code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False ) -> Tuple:
'''simple docstring'''
__lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
__lowercase = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
__lowercase = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[
: config.hidden_size, :
]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[
-config.hidden_size :, :
]
__lowercase = in_proj_bias[-config.hidden_size :]
def __lowercase ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__lowercase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__lowercase = dct.pop(_lowerCAmelCase )
__lowercase = val
@torch.no_grad()
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__lowercase = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCAmelCase )
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
if "vqa" in checkpoint_url:
__lowercase = True
__lowercase = 3_129
__lowercase = "huggingface/label-files"
__lowercase = "vqa2-id2label.json"
__lowercase = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = ViltForQuestionAnswering(_lowerCAmelCase )
elif "nlvr" in checkpoint_url:
__lowercase = True
__lowercase = 2
__lowercase = {0: "False", 1: "True"}
__lowercase = {v: k for k, v in config.idalabel.items()}
__lowercase = 3
__lowercase = ViltForImagesAndTextClassification(_lowerCAmelCase )
elif "irtr" in checkpoint_url:
__lowercase = True
__lowercase = ViltForImageAndTextRetrieval(_lowerCAmelCase )
elif "mlm_itm" in checkpoint_url:
__lowercase = True
__lowercase = ViltForMaskedLM(_lowerCAmelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
__lowercase = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )["state_dict"]
__lowercase = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
if mlm_model or irtr_model:
__lowercase = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
__lowercase = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCAmelCase )
# Define processor
__lowercase = ViltImageProcessor(size=384 )
__lowercase = BertTokenizer.from_pretrained("bert-base-uncased" )
__lowercase = ViltProcessor(_lowerCAmelCase , _lowerCAmelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
__lowercase = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCAmelCase ).raw )
__lowercase = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCAmelCase ).raw )
__lowercase = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
__lowercase = processor(_lowerCAmelCase , _lowerCAmelCase , return_tensors="pt" )
__lowercase = processor(_lowerCAmelCase , _lowerCAmelCase , return_tensors="pt" )
__lowercase = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
__lowercase = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCAmelCase ).raw )
if mlm_model:
__lowercase = "a bunch of [MASK] laying on a [MASK]."
else:
__lowercase = "How many cats are there?"
__lowercase = processor(_lowerCAmelCase , _lowerCAmelCase , return_tensors="pt" )
__lowercase = model(**_lowerCAmelCase )
# Verify outputs
if mlm_model:
__lowercase = torch.Size([1, 11, 30_522] )
__lowercase = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCAmelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
__lowercase = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
__lowercase = torch.Size([1, 3_129] )
__lowercase = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCAmelCase , atol=1E-4 )
# verify vqa prediction equals "2"
__lowercase = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
__lowercase = torch.Size([1, 2] )
__lowercase = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase__ = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 321 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCamelCase : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCamelCase : int = SeqaSeqDataset
# Get datasets
_lowerCamelCase : Tuple = (
dataset_class(
_lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowerCamelCase : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCamelCase : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCamelCase : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowerCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" )
_lowerCamelCase : Dict = data_args.n_val
_lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" )
_lowerCamelCase : Dict = test_output.metrics
_lowerCamelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCamelCase : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
_lowerCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 44 | 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=99 , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : int=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=5_12 , SCREAMING_SNAKE_CASE__ : int=16 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : str=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = 13
UpperCamelCase = 7
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = 99
UpperCamelCase = 32
UpperCamelCase = 2
UpperCamelCase = 4
UpperCamelCase = 37
UpperCamelCase = "gelu"
UpperCamelCase = 0.1
UpperCamelCase = 0.1
UpperCamelCase = 5_12
UpperCamelCase = 16
UpperCamelCase = 2
UpperCamelCase = 0.02
UpperCamelCase = 3
UpperCamelCase = 4
UpperCamelCase = None
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = TFRoFormerModel(config=__A )
UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase = [input_ids, input_mask]
UpperCamelCase = model(__A )
UpperCamelCase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = TFRoFormerForCausalLM(config=__A )
UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase = model(__A )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = TFRoFormerForMaskedLM(config=__A )
UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFRoFormerForSequenceClassification(config=__A )
UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.num_choices
UpperCamelCase = TFRoFormerForMultipleChoice(config=__A )
UpperCamelCase = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFRoFormerForTokenClassification(config=__A )
UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFRoFormerForQuestionAnswering(config=__A )
UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
UpperCamelCase
) = config_and_inputs
UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =(
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : str =(
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFRoFormerModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__A )
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
UpperCamelCase = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(__A )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
UpperCamelCase = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase = model(__A )[0]
# TODO Replace vocab size
UpperCamelCase = 5_00_00
UpperCamelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , __A )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCamelCase = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __A , atol=1e-4 )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =1E-4
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = tf.constant([[4, 10]] )
UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCamelCase = emba(input_ids.shape )
UpperCamelCase = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__A , __A , atol=self.tolerance )
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
UpperCamelCase = emba.weight[:3, :5]
tf.debugging.assert_near(__A , __A , atol=self.tolerance )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =1E-4
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
UpperCamelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
UpperCamelCase = embed_positions([2, 16, 7_68] )[None, None, :, :]
UpperCamelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__A , __A , __A )
UpperCamelCase = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
UpperCamelCase = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __A , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __A , atol=self.tolerance )
| 282 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : list[list] ) -> List[str]:
__lowerCamelCase : List[str] = current_set.copy()
for row_index, row in enumerate(_lowerCAmelCase ):
__lowerCamelCase : Optional[int] = row[0]
for column_index, column in enumerate(_lowerCAmelCase ):
if magnitude == 0:
__lowerCamelCase : Optional[Any] = column
continue
__lowerCamelCase : List[str] = column / magnitude
# Subtract to cancel term
__lowerCamelCase : List[str] = current_set[0]
__lowerCamelCase : Dict = [first_row]
__lowerCamelCase : Union[str, Any] = current_set[1::]
for row in current_set:
__lowerCamelCase : int = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_lowerCAmelCase )
continue
for column_index in range(len(_lowerCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_lowerCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__lowerCamelCase : Optional[Any] = final_set[0]
__lowerCamelCase : List[str] = []
__lowerCamelCase : Optional[Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__lowerCamelCase : Optional[int] = simplify(_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _lowerCAmelCase )
__lowerCamelCase : Any = resultant
return final_set
def UpperCAmelCase__ ( UpperCAmelCase_ : list[list] ) -> Optional[int]:
if len(_lowerCAmelCase ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
__lowerCamelCase : str = len(_lowerCAmelCase ) + 1
if any(len(_lowerCAmelCase ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(_lowerCAmelCase , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(_lowerCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
__lowerCamelCase : int = equations.copy()
if any(0 in row for row in data_set ):
__lowerCamelCase : Union[str, Any] = data_set.copy()
__lowerCamelCase : Optional[Any] = []
for row_index, row in enumerate(_lowerCAmelCase ):
if 0 not in row:
__lowerCamelCase : Dict = data_set.pop(_lowerCAmelCase )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , _lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = data_set.copy()
__lowerCamelCase : Union[str, Any] = simplify(_lowerCAmelCase )
__lowerCamelCase : int = simplified[::-1]
__lowerCamelCase : list = []
for row in simplified:
__lowerCamelCase : List[Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__lowerCamelCase : Optional[int] = row.copy()[: len(_lowerCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_lowerCAmelCase ) == 0:
solutions.append(0 )
continue
__lowerCamelCase : List[str] = temp_row[1::]
__lowerCamelCase : Union[str, Any] = temp_row[::-1]
for column_index, column in enumerate(_lowerCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_lowerCAmelCase )
__lowerCamelCase : Tuple = []
for item in solutions:
final.append(float(round(_lowerCAmelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Union[str, Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 13 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
'''simple docstring'''
import argparse
import copy
def lowerCamelCase ( UpperCAmelCase__ : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = {}
with open(_lowerCAmelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
SCREAMING_SNAKE_CASE__ :Dict = []
_list.append([line.split()[1], line.split()[2]] )
SCREAMING_SNAKE_CASE__ :List[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
SCREAMING_SNAKE_CASE__ :List[Any] = []
_list.append([line.split()[0], line.split()[2]] )
SCREAMING_SNAKE_CASE__ :Dict = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict ) -> Tuple:
'''simple docstring'''
with open(_lowerCAmelCase ) as f:
SCREAMING_SNAKE_CASE__ :Optional[Any] = f.read(1 )
SCREAMING_SNAKE_CASE__ :Optional[int] = start_node
SCREAMING_SNAKE_CASE__ :List[Any] = []
SCREAMING_SNAKE_CASE__ :List[str] = start_node
SCREAMING_SNAKE_CASE__ :Optional[Any] = 0
while visiting not in first_solution:
SCREAMING_SNAKE_CASE__ :Any = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowerCAmelCase ) and k[0] not in first_solution:
SCREAMING_SNAKE_CASE__ :Optional[int] = k[1]
SCREAMING_SNAKE_CASE__ :Tuple = k[0]
first_solution.append(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :Any = distance_of_first_solution + int(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :Optional[int] = best_node
first_solution.append(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :Any = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = []
for n in solution[1:-1]:
SCREAMING_SNAKE_CASE__ :int = solution.index(_lowerCAmelCase )
for kn in solution[1:-1]:
SCREAMING_SNAKE_CASE__ :List[str] = solution.index(_lowerCAmelCase )
if n == kn:
continue
SCREAMING_SNAKE_CASE__ :Dict = copy.deepcopy(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = kn
SCREAMING_SNAKE_CASE__ :List[Any] = n
SCREAMING_SNAKE_CASE__ :Any = 0
for k in _tmp[:-1]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = _tmp[_tmp.index(_lowerCAmelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
SCREAMING_SNAKE_CASE__ :int = distance + int(i[1] )
_tmp.append(_lowerCAmelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
SCREAMING_SNAKE_CASE__ :List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCAmelCase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = 1
SCREAMING_SNAKE_CASE__ :Union[str, Any] = first_solution
SCREAMING_SNAKE_CASE__ :Optional[Any] = []
SCREAMING_SNAKE_CASE__ :Optional[int] = distance_of_first_solution
SCREAMING_SNAKE_CASE__ :List[str] = solution
while count <= iters:
SCREAMING_SNAKE_CASE__ :List[str] = find_neighborhood(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :List[Any] = 0
SCREAMING_SNAKE_CASE__ :List[Any] = neighborhood[index_of_best_solution]
SCREAMING_SNAKE_CASE__ :Optional[Any] = len(_lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE__ :List[str] = False
while not found:
SCREAMING_SNAKE_CASE__ :str = 0
while i < len(_lowerCAmelCase ):
if best_solution[i] != solution[i]:
SCREAMING_SNAKE_CASE__ :str = best_solution[i]
SCREAMING_SNAKE_CASE__ :Any = solution[i]
break
SCREAMING_SNAKE_CASE__ :List[Any] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
SCREAMING_SNAKE_CASE__ :Optional[int] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = best_solution[:-1]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
SCREAMING_SNAKE_CASE__ :Tuple = cost
SCREAMING_SNAKE_CASE__ :List[str] = solution
else:
SCREAMING_SNAKE_CASE__ :Dict = index_of_best_solution + 1
SCREAMING_SNAKE_CASE__ :Any = neighborhood[index_of_best_solution]
if len(_lowerCAmelCase ) >= size:
tabu_list.pop(0 )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = count + 1
return best_solution_ever, best_cost
def lowerCamelCase ( UpperCAmelCase__ : List[str]=None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = generate_neighbours(args.File )
SCREAMING_SNAKE_CASE__ :Tuple = generate_first_solution(
args.File , _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :int = tabu_search(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 209 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__A : Optional[Any] = None
__A : Optional[int] = logging.get_logger(__name__)
__A : str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__A : str = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
__A : int = {
'facebook/nllb-large-en-ro': 1_024,
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
__A : List[str] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Any = VOCAB_FILES_NAMES
__magic_name__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[str] = NllbTokenizer
__magic_name__ : Tuple = []
__magic_name__ : Union[str, Any] = []
def __init__( self : Dict , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]="<s>" , UpperCamelCase__ : Union[str, Any]="</s>" , UpperCamelCase__ : Optional[int]="</s>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : List[Any]="<unk>" , UpperCamelCase__ : Any="<pad>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=False , **UpperCamelCase__ : Union[str, Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
A__ : Optional[int] =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
A__ : Tuple =legacy_behaviour
super().__init__(
vocab_file=__A , tokenizer_file=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , legacy_behaviour=__A , **__A , )
A__ : Dict =vocab_file
A__ : int =False if not self.vocab_file else True
A__ : Dict =FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
A__ : Dict ={
lang_code: self.convert_tokens_to_ids(__A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A__ : Union[str, Any] =src_lang if src_lang is not None else "eng_Latn"
A__ : Optional[int] =self.convert_tokens_to_ids(self._src_lang )
A__ : str =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCAmelCase ( self : Optional[int] ):
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ):
A__ : List[Any] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Optional[int] =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : str ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A__ : int =src_lang
A__ : Optional[int] =self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
A__ : Optional[Any] =self.convert_tokens_to_ids(__A )
A__ : Any =tgt_lang_id
return inputs
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "eng_Latn" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "fra_Latn" , **UpperCamelCase__ : Tuple , ):
A__ : Any =src_lang
A__ : Tuple =tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def _UpperCAmelCase ( self : List[Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self : Optional[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : str ):
A__ : int =self.convert_tokens_to_ids(__A )
if self.legacy_behaviour:
A__ : Optional[int] =[]
A__ : Dict =[self.eos_token_id, self.cur_lang_code]
else:
A__ : Optional[int] =[self.cur_lang_code]
A__ : Any =[self.eos_token_id]
A__ : int =self.convert_ids_to_tokens(self.prefix_tokens )
A__ : List[str] =self.convert_ids_to_tokens(self.suffix_tokens )
A__ : int =processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ):
A__ : Dict =self.convert_tokens_to_ids(__A )
if self.legacy_behaviour:
A__ : Dict =[]
A__ : Union[str, Any] =[self.eos_token_id, self.cur_lang_code]
else:
A__ : List[Any] =[self.cur_lang_code]
A__ : Tuple =[self.eos_token_id]
A__ : int =self.convert_ids_to_tokens(self.prefix_tokens )
A__ : Tuple =self.convert_ids_to_tokens(self.suffix_tokens )
A__ : List[str] =processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
A__ : List[str] =os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 656 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 125 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=5_12,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def A__ ( lowerCamelCase ) -> str:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
lowerCamelCase_ : Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 548 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 0 |
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[str] = '''Salesforce/blip-image-captioning-base'''
snake_case__ : Optional[Any] = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
snake_case__ : Dict = '''image_captioner'''
snake_case__ : Union[str, Any] = AutoModelForVisionaSeq
snake_case__ : Tuple = ['''image''']
snake_case__ : Optional[Any] = ['''text''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
requires_backends(self , ['vision'] )
super().__init__(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : "Image" ) -> Optional[int]:
return self.pre_processor(images=__A , return_tensors='pt' )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Dict ) -> int:
return self.model.generate(**__A )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0].strip()
| 570 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 | 0 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = range(2, 2_0 + 1)
SCREAMING_SNAKE_CASE__ : str = [1_0**k for k in range(ks[-1] + 1)]
SCREAMING_SNAKE_CASE__ : dict[int, dict[int, list[list[int]]]] = {}
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
a__ : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
a__ : int = 0, 0
a__ : Dict = n - i
a__ : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
a__ : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
a__ : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
a__ : Any = _k
break
if max_jump >= 0:
a__ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
a__ : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
a__ : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
a__ : int = []
else:
a__ : Tuple = {c: []}
a__ : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
a__ : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
a__ : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
a__ : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
a__ : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
a__ : List[str] = i
a__ : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
a__ : int = ds_c + ds_b
diff += addend
a__ : List[str] = 0
for j in range(_lowerCAmelCase ):
a__ : List[Any] = a_i[j] + addend
a__ : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
a__ : Tuple = digits[j] + addend
if s >= 10:
a__ : Optional[int] = divmod(_lowerCAmelCase , 10 )
a__ : Any = addend // 10 + quotient
else:
a__ : Tuple = s
a__ : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
a__ : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def _A ( lowerCamelCase = 10**15 ):
a__ : Tuple = [1]
a__ : List[Any] = 1
a__ : List[str] = 0
while True:
a__ : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
a__ : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 112 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
if (resistance, reactance, impedance).count(0) != 1:
raise ValueError('One and only one argument must be 0')
if resistance == 0:
return {"resistance": sqrt(pow(_lowerCAmelCase , 2) - pow(_lowerCAmelCase , 2))}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowerCAmelCase , 2) - pow(_lowerCAmelCase , 2))}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowerCAmelCase , 2) + pow(_lowerCAmelCase , 2))}
else:
raise ValueError('Exactly one argument must be 0')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 596 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__lowerCAmelCase : Optional[Any] = False, False, False
@dataclass
class A :
a_ = None
a_ = True
a_ = True
a_ = None
# Automatically constructed
a_ = '''dict'''
a_ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
a_ = field(default='''Audio''' , init=UpperCAmelCase , repr=UpperCAmelCase )
def __call__( self : Tuple ) -> str:
return self.pa_type
def snake_case__ ( self : Any , __a : Union[str, bytes, dict] ) -> List[str]:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(__A , __A ):
return {"bytes": None, "path": value}
elif isinstance(__A , __A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__UpperCAmelCase = BytesIO()
sf.write(__A , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__UpperCAmelCase = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
__UpperCAmelCase = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2_7_6_7
__UpperCAmelCase = BytesIO(bytes() )
sf.write(__A , __A , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f"""An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""" )
def snake_case__ ( self : Optional[Any] , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ) -> int:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
__UpperCAmelCase = (value["path"], BytesIO(value['''bytes'''] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
__UpperCAmelCase = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ''' )
if file is None:
__UpperCAmelCase = token_per_repo_id or {}
__UpperCAmelCase = path.split('''::''' )[-1]
try:
__UpperCAmelCase = string_to_dict(__A , config.HUB_DATASETS_URL )["repo_id"]
__UpperCAmelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__UpperCAmelCase = None
with xopen(__A , '''rb''' , use_auth_token=__A ) as f:
__UpperCAmelCase = sf.read(__A )
else:
__UpperCAmelCase = sf.read(__A )
__UpperCAmelCase = array.T
if self.mono:
__UpperCAmelCase = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__UpperCAmelCase = librosa.resample(__A , orig_sr=__A , target_sr=self.sampling_rate )
__UpperCAmelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def snake_case__ ( self : Any ) -> Optional[int]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def snake_case__ ( self : List[str] , __a : Union[pa.StringArray, pa.StructArray] ) -> int:
if pa.types.is_string(storage.type ):
__UpperCAmelCase = pa.array([None] * len(__A ) , type=pa.binary() )
__UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__UpperCAmelCase = pa.array([None] * len(__A ) , type=pa.string() )
__UpperCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
__UpperCAmelCase = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
__UpperCAmelCase = storage.field('''bytes''' )
else:
__UpperCAmelCase = pa.array([None] * len(__A ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
__UpperCAmelCase = storage.field('''path''' )
else:
__UpperCAmelCase = pa.array([None] * len(__A ) , type=pa.string() )
__UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(__A , self.pa_type )
def snake_case__ ( self : str , __a : pa.StructArray ) -> Optional[Any]:
@no_op_if_value_is_null
def path_to_bytes(__a : Dict ):
with xopen(__A , '''rb''' ) as f:
__UpperCAmelCase = f.read()
return bytes_
__UpperCAmelCase = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__UpperCAmelCase = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
__UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__A , self.pa_type )
| 262 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 321 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 44 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def __lowerCamelCase ( _lowercase , _lowercase = 16 ) -> List[str]:
UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
_lowerCAmelCase , padding='longest' , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( _lowercase , _lowercase ) -> Union[str, Any]:
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _lowerCAmelCase ) == "1":
UpperCamelCase = 2
# Initialize accelerator
UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["lr"]
UpperCamelCase = int(config['num_epochs'] )
UpperCamelCase = int(config['seed'] )
UpperCamelCase = int(config['batch_size'] )
UpperCamelCase = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_lowerCAmelCase )
def inner_training_loop(_lowercase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
UpperCamelCase = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase = model(**_lowerCAmelCase )
UpperCamelCase = outputs.loss
accelerator.backward(_lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**_lowerCAmelCase )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _lowerCAmelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __lowerCamelCase ( ) -> Optional[int]:
UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 282 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['transformers', 'torch', 'note_seq']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : str,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Dict,*__A : Dict,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
| 44 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
A__ : str = [
'good first issue',
'feature request',
'wip',
]
def UpperCAmelCase__ ( ) -> Union[str, Any]:
__lowerCamelCase : List[Any] = Github(os.environ['GITHUB_TOKEN'] )
__lowerCamelCase : Optional[int] = g.get_repo('huggingface/accelerate' )
__lowerCamelCase : Dict = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCamelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCAmelCase_ : i.created_at , reverse=_lowerCAmelCase )
__lowerCamelCase : Any = comments[0] if len(_lowerCAmelCase ) > 0 else None
__lowerCamelCase : List[Any] = dt.utcnow()
__lowerCamelCase : Optional[Any] = (current_time - issue.updated_at).days
__lowerCamelCase : Optional[int] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 13 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass
| 44 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
A_ : Optional[int] = Features({'audio': Audio()} )
A_ : Union[str, Any] = Features({'transcription': Value('string' )} )
A_ : List[Any] = 'audio'
A_ : Tuple = 'transcription'
def __lowerCamelCase ( self : int , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __A ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
SCREAMING_SNAKE_CASE__ :Optional[int] = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ :int = self.input_schema.copy()
SCREAMING_SNAKE_CASE__ :Any = features[self.audio_column]
SCREAMING_SNAKE_CASE__ :Dict = input_schema
return task_template
@property
def __lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 209 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44 | 0 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__A : str = logging.get_logger(__name__)
__A : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__A : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__A : List[str] = {
'allenai/led-base-16384': 16_384,
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = LEDTokenizer
__magic_name__ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]="replace" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : Union[str, Any]="</s>" , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : Tuple="<mask>" , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=True , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
A__ : List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __A ) != add_prefix_space:
A__ : str =getattr(__A , pre_tok_state.pop("type" ) )
A__ : List[Any] =add_prefix_space
A__ : Tuple =pre_tok_class(**__A )
A__ : Optional[int] =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A__ : List[str] ="post_processor"
A__ : int =getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
A__ : Tuple =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ : str =tuple(state["sep"] )
if "cls" in state:
A__ : List[str] =tuple(state["cls"] )
A__ : Dict =False
if state.get("add_prefix_space" , __A ) != add_prefix_space:
A__ : List[str] =add_prefix_space
A__ : List[Any] =True
if state.get("trim_offsets" , __A ) != trim_offsets:
A__ : List[str] =trim_offsets
A__ : List[str] =True
if changes_to_apply:
A__ : Tuple =getattr(__A , state.pop("type" ) )
A__ : Any =component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ):
A__ : Optional[Any] =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
A__ : str =value
def _UpperCAmelCase ( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : int ):
A__ : List[str] =kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A , **__A )
def _UpperCAmelCase ( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Union[str, Any] ):
A__ : List[Any] =kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A , **__A )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
A__ : List[str] =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=None ):
A__ : Optional[Any] =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : Union[str, Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : List[str] =super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
A__ : Any ="attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A__ : Union[str, Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A__ : Optional[Any] =len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
A__ : str =len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A__ : Tuple =(
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
A__ : int =[-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 656 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = "mobilenet_v2"
def __init__( self : str ,UpperCamelCase : Dict=3 ,UpperCamelCase : int=224 ,UpperCamelCase : List[Any]=1.0 ,UpperCamelCase : Any=8 ,UpperCamelCase : List[Any]=8 ,UpperCamelCase : Dict=6 ,UpperCamelCase : Tuple=32 ,UpperCamelCase : List[Any]=True ,UpperCamelCase : Union[str, Any]=True ,UpperCamelCase : int="relu6" ,UpperCamelCase : Tuple=True ,UpperCamelCase : Optional[Any]=0.8 ,UpperCamelCase : List[Any]=0.0_2 ,UpperCamelCase : List[Any]=0.0_0_1 ,UpperCamelCase : str=255 ,**UpperCamelCase : Dict ,) -> List[str]:
super().__init__(**__A )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowercase : List[str] = num_channels
_lowercase : Tuple = image_size
_lowercase : Optional[Any] = depth_multiplier
_lowercase : Optional[Any] = depth_divisible_by
_lowercase : Any = min_depth
_lowercase : Dict = expand_ratio
_lowercase : Tuple = output_stride
_lowercase : Tuple = first_layer_is_expansion
_lowercase : Tuple = finegrained_output
_lowercase : str = hidden_act
_lowercase : List[Any] = tf_padding
_lowercase : Dict = classifier_dropout_prob
_lowercase : Any = initializer_range
_lowercase : str = layer_norm_eps
_lowercase : Any = semantic_loss_ignore_index
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : int = version.parse("1.11" )
@property
def _lowerCamelCase ( self : Dict ) -> Any:
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def _lowerCamelCase ( self : Dict ) -> Optional[Any]:
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def _lowerCamelCase ( self : Dict ) -> List[Any]:
return 1e-4
| 125 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = IFInpaintingSuperResolutionPipeline
__UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__UpperCamelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__UpperCamelCase : Any = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCAmelCase__ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self : str , snake_case_ : List[str] , snake_case_ : List[str]=0 ):
if str(__A ).startswith("""mps""" ):
UpperCamelCase_: List[str] = torch.manual_seed(__A )
else:
UpperCamelCase_: Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
UpperCamelCase_: List[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(__A ) ).to(__A )
UpperCamelCase_: Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
UpperCamelCase_: Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
UpperCamelCase_: Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase__ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ ( self : Dict ):
self._test_save_load_local()
def lowerCAmelCase__ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 548 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Any,__A : List[Any]=8_0,__A : Dict=1_6_0_0_0,__A : Tuple=0.0,__A : Dict=1_0,__A : int=2_5,__A : Union[str, Any]="hamming_window",__A : List[str]=32768.0,__A : Union[str, Any]=0.97,__A : str=1.0,__A : Union[str, Any]=True,__A : Tuple=True,__A : Optional[Any]=False,**__A : Optional[Any],):
super().__init__(feature_size=__A,sampling_rate=__A,padding_value=__A,**__A )
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Any = padding_value
_lowerCamelCase : Dict = hop_length
_lowerCamelCase : Tuple = win_length
_lowerCamelCase : str = frame_signal_scale
_lowerCamelCase : List[str] = preemphasis_coeff
_lowerCamelCase : List[str] = mel_floor
_lowerCamelCase : str = normalize_means
_lowerCamelCase : Any = normalize_vars
_lowerCamelCase : List[str] = win_function
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Dict = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self : Any,__A : np.array ):
if self.win_function == "hamming_window":
_lowerCamelCase : Any = window_function(window_length=self.sample_size,name=self.win_function,periodic=__A )
else:
_lowerCamelCase : Optional[int] = window_function(window_length=self.sample_size,name=self.win_function )
_lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
_lowerCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale,window=__A,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__A,preemphasis=self.preemphasis_coeff,mel_filters=__A,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Dict,__A : int ):
# make sure we normalize float32 arrays
if self.normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_lowerCamelCase : Optional[int] = np.subtract(__A,__A )
if self.normalize_vars:
_lowerCamelCase : int = x[:input_length].std(axis=0 )
_lowerCamelCase : Any = np.divide(__A,__A )
if input_length < x.shape[0]:
_lowerCamelCase : Tuple = padding_value
# make sure array is in float32
_lowerCamelCase : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self : Any,__A : List[np.ndarray],__A : Optional[np.ndarray] = None ):
_lowerCamelCase : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A,__A,self.padding_value ) for x, n in zip(__A,__A )]
def __call__( self : Optional[Any],__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : Union[bool, str, PaddingStrategy] = False,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[int] = None,**__A : Optional[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : List[str] = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray(__A,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [raw_speech]
# extract fbank features
_lowerCamelCase : str = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : Union[str, Any] = BatchFeature({"input_features": features} )
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=__A,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=__A,**__A,)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__A ):
_lowerCamelCase : int = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
_lowerCamelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCamelCase : Dict = [np.asarray(__A,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCamelCase : Dict = (
np.array(__A,dtype=np.intaa )
if self._get_padding_strategies(__A,max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["input_features"],attention_mask=__A )
if return_tensors is not None:
_lowerCamelCase : Dict = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 44 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
raise NotImplementedError()
| 570 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 0 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _A ( lowerCamelCase ):
a__ : Tuple = tmp_path / "file.csv"
a__ : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def _A ( lowerCamelCase ):
a__ : Any = tmp_path / "malformed_file.csv"
a__ : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : int = tmp_path / "csv_with_image.csv"
a__ : int = textwrap.dedent(
F"""\\n image\n {image_file}\n """ )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def _A ( lowerCamelCase ):
a__ : Dict = tmp_path / "csv_with_label.csv"
a__ : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def _A ( lowerCamelCase ):
a__ : Dict = tmp_path / "csv_with_int_list.csv"
a__ : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[Any] = Csv()
a__ : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def _A ( lowerCamelCase ):
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
a__ : Any = f.read().splitlines()[1]
a__ : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
a__ : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
a__ : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
a__ : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def _A ( lowerCamelCase ):
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
a__ : List[Any] = f.read().splitlines()[1:]
a__ : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
a__ : Tuple = csv._generate_tables([[csv_file_with_label]] )
a__ : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
a__ : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def _A ( lowerCamelCase ):
a__ : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda lowerCamelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
a__ : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
a__ : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
a__ : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 112 |
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = 10
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Any = [1, 2, 3, 4]
UpperCamelCase__ : Dict = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__A , self.block_size , 0) , __A)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCamelCase__ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__A , self.block_size , 0) , __A)
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCamelCase__ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__A , self.block_size , 0) , __A)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : int = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
UpperCamelCase__ : List[str] = process_story(__A)
self.assertEqual(__A , [])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Tuple = ""
UpperCamelCase__ : Optional[Any] = process_story(__A)
self.assertEqual(__A , [])
self.assertEqual(__A , [])
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
UpperCamelCase__ : List[str] = process_story(__A)
UpperCamelCase__ : Tuple = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__A , __A)
UpperCamelCase__ : str = ["It was the best of times."]
self.assertEqual(__A , __A)
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = torch.tensor([1, 2, 3, 4])
UpperCamelCase__ : str = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(__A , 0).numpy() , expected.numpy())
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : List[str] = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCamelCase__ : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(__A , 23).numpy() , expected.numpy())
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : List[str] = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCamelCase__ : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(__A , 1).numpy() , expected.numpy())
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[int] = 101
UpperCamelCase__ : str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCamelCase__ : Dict = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCamelCase__ : List[Any] = compute_token_type_ids(__A , __A)
np.testing.assert_array_equal(__A , __A)
| 596 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase )
_lowerCamelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 847
_lowerCamelCase : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_lowerCamelCase : Optional[int] = 150
_lowerCamelCase : Union[str, Any] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase : Union[str, Any] = 171
_lowerCamelCase : str = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_lowerCamelCase : Optional[int] = 133
_lowerCamelCase : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase : str = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 65
_lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json"
_lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: dim]
_lowerCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : Any = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_lowerCamelCase : str = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : int = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_lowerCamelCase : Any = prepare_img()
if "vistas" in model_name:
_lowerCamelCase : Any = 65
elif "cityscapes" in model_name:
_lowerCamelCase : Optional[Any] = 65535
else:
_lowerCamelCase : str = 255
_lowerCamelCase : List[str] = True if "ade" in model_name else False
_lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
_lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 44 | 0 |
'''simple docstring'''
import os
import sys
import transformers
__lowerCAmelCase : Any = '3'
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 262 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase = """AutoImageProcessor"""
__lowerCAmelCase = """AutoTokenizer"""
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
__lowercase = kwargs.pop("feature_extractor" )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A , __A )
__lowercase = self.image_processor
__lowercase = False
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__lowercase = kwargs.pop("images" , __A )
__lowercase = kwargs.pop("text" , __A )
if len(__A ) > 0:
__lowercase = args[0]
__lowercase = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
__lowercase = self.image_processor(__A , *__A , **__A )
if text is not None:
__lowercase = self.tokenizer(__A , **__A )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowercase = encodings["input_ids"]
return inputs
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.batch_decode(*__A , **__A )
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.decode(*__A , **__A )
@contextmanager
def snake_case__ ( self ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
__lowercase = True
__lowercase = self.tokenizer
yield
__lowercase = self.image_processor
__lowercase = False
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=None ):
if added_vocab is None:
__lowercase = self.tokenizer.get_added_vocab()
__lowercase = {}
while tokens:
__lowercase = re.search(r"<s_(.*?)>" , __A , re.IGNORECASE )
if start_token is None:
break
__lowercase = start_token.group(1 )
__lowercase = re.search(rf'''</s_{key}>''' , __A , re.IGNORECASE )
__lowercase = start_token.group()
if end_token is None:
__lowercase = tokens.replace(__A , "" )
else:
__lowercase = end_token.group()
__lowercase = re.escape(__A )
__lowercase = re.escape(__A )
__lowercase = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , __A , re.IGNORECASE )
if content is not None:
__lowercase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowercase = self.tokenajson(__A , is_inner_value=__A , added_vocab=__A )
if value:
if len(__A ) == 1:
__lowercase = value[0]
__lowercase = value
else: # leaf nodes
__lowercase = []
for leaf in content.split(r"<sep/>" ):
__lowercase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowercase = leaf[1:-2] # for categorical special tokens
output[key].append(__A )
if len(output[key] ) == 1:
__lowercase = output[key][0]
__lowercase = tokens[tokens.find(__A ) + len(__A ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__A , added_vocab=__A )
if len(__A ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def snake_case__ ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , )
return self.image_processor_class
@property
def snake_case__ ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , )
return self.image_processor
| 321 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCamelCase : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCamelCase : int = SeqaSeqDataset
# Get datasets
_lowerCamelCase : Tuple = (
dataset_class(
_lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowerCamelCase : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCamelCase : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCamelCase : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowerCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" )
_lowerCamelCase : Dict = data_args.n_val
_lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" )
_lowerCamelCase : Dict = test_output.metrics
_lowerCamelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCamelCase : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
_lowerCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 44 | 0 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_snake_case = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
_snake_case = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
_snake_case = soup.find('''meta''', {'''property''': '''og:image'''})['content']
_snake_case = requests.get(image_url).content
_snake_case = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 282 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44 | 0 |
'''simple docstring'''
import math
import sys
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Optional[int]:
__lowerCamelCase : Dict = ""
try:
with open(_lowerCAmelCase , 'rb' ) as binary_file:
__lowerCamelCase : List[Any] = binary_file.read()
for dat in data:
__lowerCamelCase : Optional[Any] = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> str:
__lowerCamelCase : Optional[int] = {"0": "0", "1": "1"}
__lowerCamelCase : List[str] = "", ""
__lowerCamelCase : List[Any] = len(_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__lowerCamelCase : Optional[Any] = lexicon[curr_string]
result += last_match_id
__lowerCamelCase : List[str] = last_match_id + "0"
if math.loga(_lowerCAmelCase ).is_integer():
__lowerCamelCase : Dict = {}
for curr_key in list(_lowerCAmelCase ):
__lowerCamelCase : Union[str, Any] = lexicon.pop(_lowerCAmelCase )
__lowerCamelCase : List[str] = new_lex
__lowerCamelCase : Union[str, Any] = last_match_id + "1"
index += 1
__lowerCamelCase : Union[str, Any] = ""
return result
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> Any:
__lowerCamelCase : str = 8
try:
with open(_lowerCAmelCase , 'wb' ) as opened_file:
__lowerCamelCase : int = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowerCAmelCase ) , _lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_lowerCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Dict:
__lowerCamelCase : Optional[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__lowerCamelCase : Tuple = data_bits[counter:]
__lowerCamelCase : int = data_bits[counter + 1 :]
return data_bits
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> Tuple:
__lowerCamelCase : Dict = read_file_binary(_lowerCAmelCase )
__lowerCamelCase : List[str] = remove_prefix(_lowerCAmelCase )
__lowerCamelCase : List[Any] = decompress_data(_lowerCAmelCase )
write_file_binary(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 13 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class _SCREAMING_SNAKE_CASE:
def __init__( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = {}
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int]=1 ) -> List[str]:
if self.graph.get(__A ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
SCREAMING_SNAKE_CASE__ :Tuple = [[w, v]]
if not self.graph.get(__A ):
SCREAMING_SNAKE_CASE__ :Tuple = []
def __lowerCamelCase ( self : Optional[int] ) -> str:
return list(self.graph )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
if self.graph.get(__A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__A )
def __lowerCamelCase ( self : int , UpperCamelCase_ : Any=-2 , UpperCamelCase_ : Optional[int]=-1 ) -> str:
if s == d:
return []
SCREAMING_SNAKE_CASE__ :Optional[Any] = []
SCREAMING_SNAKE_CASE__ :int = []
if s == -2:
SCREAMING_SNAKE_CASE__ :str = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE__ :Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__A ) != 0:
SCREAMING_SNAKE_CASE__ :List[str] = stack[len(__A ) - 1]
else:
SCREAMING_SNAKE_CASE__ :int = ss
# check if se have reached the starting point
if len(__A ) == 0:
return visited
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any]=-1 ) -> Any:
if c == -1:
SCREAMING_SNAKE_CASE__ :List[str] = floor(random() * 1_00_00 ) + 10
for i in range(__A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
SCREAMING_SNAKE_CASE__ :Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(__A , __A , 1 )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Dict=-2 ) -> List[str]:
SCREAMING_SNAKE_CASE__ :Tuple = deque()
SCREAMING_SNAKE_CASE__ :Any = []
if s == -2:
SCREAMING_SNAKE_CASE__ :Optional[Any] = list(self.graph )[0]
d.append(__A )
visited.append(__A )
while d:
SCREAMING_SNAKE_CASE__ :Optional[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __lowerCamelCase ( self : Dict , UpperCamelCase_ : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ :int = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __lowerCamelCase ( self : Optional[Any] , UpperCamelCase_ : Dict ) -> Any:
return len(self.graph[u] )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : Dict=-2 ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = []
SCREAMING_SNAKE_CASE__ :List[str] = []
if s == -2:
SCREAMING_SNAKE_CASE__ :Tuple = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = s
SCREAMING_SNAKE_CASE__ :Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE__ :Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE__ :str = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__A ) != 0:
SCREAMING_SNAKE_CASE__ :Tuple = stack[len(__A ) - 1]
else:
SCREAMING_SNAKE_CASE__ :List[str] = ss
# check if se have reached the starting point
if len(__A ) == 0:
return sorted_nodes
def __lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :List[Any] = []
SCREAMING_SNAKE_CASE__ :str = []
SCREAMING_SNAKE_CASE__ :Tuple = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
SCREAMING_SNAKE_CASE__ :int = -2
SCREAMING_SNAKE_CASE__ :str = []
SCREAMING_SNAKE_CASE__ :List[Any] = s
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE__ :Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
SCREAMING_SNAKE_CASE__ :List[Any] = len(__A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE__ :Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
SCREAMING_SNAKE_CASE__ :str = True
if len(__A ) != 0:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = stack[len(__A ) - 1]
else:
SCREAMING_SNAKE_CASE__ :int = False
indirect_parents.append(__A )
SCREAMING_SNAKE_CASE__ :int = s
SCREAMING_SNAKE_CASE__ :Dict = ss
# check if se have reached the starting point
if len(__A ) == 0:
return list(__A )
def __lowerCamelCase ( self : int ) -> int:
SCREAMING_SNAKE_CASE__ :Optional[Any] = []
SCREAMING_SNAKE_CASE__ :List[Any] = []
SCREAMING_SNAKE_CASE__ :Any = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
SCREAMING_SNAKE_CASE__ :List[str] = -2
SCREAMING_SNAKE_CASE__ :Dict = []
SCREAMING_SNAKE_CASE__ :Dict = s
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE__ :Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
SCREAMING_SNAKE_CASE__ :List[Any] = len(__A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE__ :List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
SCREAMING_SNAKE_CASE__ :Any = True
if len(__A ) != 0:
SCREAMING_SNAKE_CASE__ :str = stack[len(__A ) - 1]
else:
SCREAMING_SNAKE_CASE__ :Any = False
indirect_parents.append(__A )
SCREAMING_SNAKE_CASE__ :Optional[int] = s
SCREAMING_SNAKE_CASE__ :Any = ss
# check if se have reached the starting point
if len(__A ) == 0:
return False
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : Any=-2 , UpperCamelCase_ : Union[str, Any]=-1 ) -> Tuple:
SCREAMING_SNAKE_CASE__ :List[Any] = time()
self.dfs(__A , __A )
SCREAMING_SNAKE_CASE__ :Any = time()
return end - begin
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : List[str]=-2 ) -> Dict:
SCREAMING_SNAKE_CASE__ :List[Any] = time()
self.bfs(__A )
SCREAMING_SNAKE_CASE__ :List[Any] = time()
return end - begin
class _SCREAMING_SNAKE_CASE:
def __init__( self : str ) -> str:
SCREAMING_SNAKE_CASE__ :Optional[int] = {}
def __lowerCamelCase ( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Tuple=1 ) -> int:
# check if the u exists
if self.graph.get(__A ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [[w, v]]
# add the other way
if self.graph.get(__A ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
SCREAMING_SNAKE_CASE__ :Tuple = [[w, u]]
def __lowerCamelCase ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ) -> List[str]:
if self.graph.get(__A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__A )
# the other way round
if self.graph.get(__A ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__A )
def __lowerCamelCase ( self : str , UpperCamelCase_ : List[str]=-2 , UpperCamelCase_ : List[Any]=-1 ) -> Any:
if s == d:
return []
SCREAMING_SNAKE_CASE__ :Any = []
SCREAMING_SNAKE_CASE__ :List[Any] = []
if s == -2:
SCREAMING_SNAKE_CASE__ :int = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
SCREAMING_SNAKE_CASE__ :str = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE__ :Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE__ :str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__A ) != 0:
SCREAMING_SNAKE_CASE__ :Optional[Any] = stack[len(__A ) - 1]
else:
SCREAMING_SNAKE_CASE__ :Any = ss
# check if se have reached the starting point
if len(__A ) == 0:
return visited
def __lowerCamelCase ( self : str , UpperCamelCase_ : List[str]=-1 ) -> int:
if c == -1:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = floor(random() * 1_00_00 ) + 10
for i in range(__A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
SCREAMING_SNAKE_CASE__ :Any = floor(random() * c ) + 1
if n != i:
self.add_pair(__A , __A , 1 )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : int=-2 ) -> List[str]:
SCREAMING_SNAKE_CASE__ :List[str] = deque()
SCREAMING_SNAKE_CASE__ :Optional[Any] = []
if s == -2:
SCREAMING_SNAKE_CASE__ :Optional[int] = list(self.graph )[0]
d.append(__A )
visited.append(__A )
while d:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
return len(self.graph[u] )
def __lowerCamelCase ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ :Optional[int] = []
SCREAMING_SNAKE_CASE__ :Optional[int] = []
SCREAMING_SNAKE_CASE__ :Optional[int] = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
SCREAMING_SNAKE_CASE__ :Tuple = -2
SCREAMING_SNAKE_CASE__ :Dict = []
SCREAMING_SNAKE_CASE__ :Any = s
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE__ :Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
SCREAMING_SNAKE_CASE__ :Optional[int] = len(__A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE__ :Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
SCREAMING_SNAKE_CASE__ :int = True
if len(__A ) != 0:
SCREAMING_SNAKE_CASE__ :int = stack[len(__A ) - 1]
else:
SCREAMING_SNAKE_CASE__ :str = False
indirect_parents.append(__A )
SCREAMING_SNAKE_CASE__ :Optional[int] = s
SCREAMING_SNAKE_CASE__ :str = ss
# check if se have reached the starting point
if len(__A ) == 0:
return list(__A )
def __lowerCamelCase ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Dict = []
SCREAMING_SNAKE_CASE__ :List[str] = []
SCREAMING_SNAKE_CASE__ :Tuple = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
SCREAMING_SNAKE_CASE__ :str = -2
SCREAMING_SNAKE_CASE__ :Optional[Any] = []
SCREAMING_SNAKE_CASE__ :Any = s
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE__ :Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
SCREAMING_SNAKE_CASE__ :int = len(__A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE__ :Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
SCREAMING_SNAKE_CASE__ :int = True
if len(__A ) != 0:
SCREAMING_SNAKE_CASE__ :List[str] = stack[len(__A ) - 1]
else:
SCREAMING_SNAKE_CASE__ :Any = False
indirect_parents.append(__A )
SCREAMING_SNAKE_CASE__ :Tuple = s
SCREAMING_SNAKE_CASE__ :int = ss
# check if se have reached the starting point
if len(__A ) == 0:
return False
def __lowerCamelCase ( self : Dict ) -> Tuple:
return list(self.graph )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : Optional[Any]=-2 , UpperCamelCase_ : Any=-1 ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Dict = time()
self.dfs(__A , __A )
SCREAMING_SNAKE_CASE__ :Tuple = time()
return end - begin
def __lowerCamelCase ( self : Dict , UpperCamelCase_ : Union[str, Any]=-2 ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Optional[int] = time()
self.bfs(__A )
SCREAMING_SNAKE_CASE__ :Dict = time()
return end - begin
| 209 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__A : str = logging.get_logger(__name__)
def lowercase ( UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A__ : Dict =UniSpeechSatForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
A__ : Dict =downstream_dict["projector.weight"]
A__ : Tuple =downstream_dict["projector.bias"]
A__ : str =downstream_dict["model.post_net.linear.weight"]
A__ : Tuple =downstream_dict["model.post_net.linear.bias"]
return model
def lowercase ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : Union[str, Any] =UniSpeechSatForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
A__ : Optional[int] =downstream_dict["model.linear.weight"]
A__ : Optional[int] =downstream_dict["model.linear.bias"]
return model
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Any ):
"""simple docstring"""
A__ : int =UniSpeechSatForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
A__ : List[Any] =downstream_dict["connector.weight"]
A__ : List[str] =downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A__ : List[Any] =downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
A__ : int =downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
A__ : Optional[int] =downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
A__ : Tuple =downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
A__ : List[Any] =downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
A__ : Tuple =downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
A__ : Tuple =downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Tuple =torch.load(_lowerCAmelCase , map_location="cpu" )
A__ : str =checkpoint["Downstream"]
A__ : str =UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
A__ : Optional[Any] =WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
A__ : Optional[Any] =hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
A__ : List[str] =convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
A__ : Optional[int] =convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForXVector" ):
A__ : Dict =convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
A__ : List[str] =checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__A : Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 656 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 125 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 | 0 |
import unittest
from transformers import DonutProcessor
lowerCamelCase_ : List[Any] = 'naver-clova-ix/donut-base'
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: List[str] = DonutProcessor.from_pretrained(__A )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Tuple = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
UpperCamelCase_: List[str] = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
UpperCamelCase_: Optional[int] = self.processor.tokenajson(__A )
self.assertDictEqual(__A , __A )
| 548 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : str = tempfile.mkdtemp()
a_ : Union[str, Any] = BlipImageProcessor()
a_ : Optional[int] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
a_ : Tuple = BlipaProcessor(__A , __A )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).tokenizer
def SCREAMING_SNAKE_CASE ( self : str , **SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).image_processor
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
a_ : List[Any] = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
a_ : Optional[int] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a_ : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
a_ : Any = self.get_image_processor(do_normalize=__A , padding_value=1.0 )
a_ : Optional[int] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : Any = self.get_image_processor()
a_ : Optional[Any] = self.get_tokenizer()
a_ : Any = BlipaProcessor(tokenizer=__A , image_processor=__A )
a_ : int = self.prepare_image_inputs()
a_ : int = image_processor(__A , return_tensors='np' )
a_ : Optional[int] = processor(images=__A , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
a_ : List[str] = self.get_image_processor()
a_ : List[str] = self.get_tokenizer()
a_ : Tuple = BlipaProcessor(tokenizer=__A , image_processor=__A )
a_ : Optional[Any] = "lower newer"
a_ : List[Any] = processor(text=__A )
a_ : Optional[int] = tokenizer(__A , return_token_type_ids=__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = self.get_image_processor()
a_ : Optional[Any] = self.get_tokenizer()
a_ : Any = BlipaProcessor(tokenizer=__A , image_processor=__A )
a_ : int = "lower newer"
a_ : List[Any] = self.prepare_image_inputs()
a_ : int = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
a_ : List[Any] = self.get_image_processor()
a_ : Optional[Any] = self.get_tokenizer()
a_ : str = BlipaProcessor(tokenizer=__A , image_processor=__A )
a_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a_ : Dict = processor.batch_decode(__A )
a_ : Tuple = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
a_ : List[str] = self.get_image_processor()
a_ : Tuple = self.get_tokenizer()
a_ : int = BlipaProcessor(tokenizer=__A , image_processor=__A )
a_ : Any = "lower newer"
a_ : Optional[Any] = self.prepare_image_inputs()
a_ : Optional[int] = processor(text=__A , images=__A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 570 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 112 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __lowercase :
_lowerCamelCase = 42
# setable values
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , UpperCAmelCase_ : CommonSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray):
return cls(common=__A , init_noise_sigma=__A , timesteps=__A)
@dataclass
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = 42
class __lowercase (__lowerCamelCase , __lowerCamelCase ):
_lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowerCamelCase = 42
@property
def __UpperCamelCase ( self : Dict):
return True
@register_to_config
def __init__( self : List[str] , UpperCAmelCase_ : int = 1_000 , UpperCAmelCase_ : float = 0.00_01 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : Optional[jnp.ndarray] = None , UpperCAmelCase_ : str = "fixed_small" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "epsilon" , UpperCAmelCase_ : jnp.dtype = jnp.floataa , ):
UpperCamelCase__ : str = dtype
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[CommonSchedulerState] = None):
if common is None:
UpperCamelCase__ : Any = CommonSchedulerState.create(self)
# standard deviation of the initial noise distribution
UpperCamelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype)
UpperCamelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps).round()[::-1]
return DDPMSchedulerState.create(
common=__A , init_noise_sigma=__A , timesteps=__A , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : DDPMSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : Optional[int] = None):
return sample
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : DDPMSchedulerState , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple = ()):
UpperCamelCase__ : int = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCamelCase__ : str = (jnp.arange(0 , __A) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__A , timesteps=__A , )
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : DDPMSchedulerState , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=None):
UpperCamelCase__ : List[str] = state.common.alphas_cumprod[t]
UpperCamelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCamelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCamelCase__ : List[str] = jnp.clip(__A , a_min=1e-20)
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCamelCase__ : Dict = jnp.log(jnp.clip(__A , a_min=1e-20))
elif variance_type == "fixed_large":
UpperCamelCase__ : Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCamelCase__ : Optional[Any] = jnp.log(state.common.betas[t])
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCamelCase__ : Optional[int] = variance
UpperCamelCase__ : str = state.common.betas[t]
UpperCamelCase__ : Optional[Any] = (predicted_variance + 1) / 2
UpperCamelCase__ : Tuple = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCamelCase ( self : int , UpperCAmelCase_ : DDPMSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : Optional[jax.random.KeyArray] = None , UpperCAmelCase_ : bool = True , ):
UpperCamelCase__ : int = timestep
if key is None:
UpperCamelCase__ : Dict = jax.random.PRNGKey(0)
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCamelCase__ : Optional[int] = jnp.split(__A , sample.shape[1] , axis=1)
else:
UpperCamelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCamelCase__ : Optional[int] = state.common.alphas_cumprod[t]
UpperCamelCase__ : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
UpperCamelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCamelCase__ : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase__ : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase__ : str = model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase__ : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
' for the FlaxDDPMScheduler.')
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase__ : str = jnp.clip(__A , -1 , 1)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCamelCase__ : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase__ : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCamelCase__ : Any = jax.random.split(__A , num=1)
UpperCamelCase__ : List[str] = jax.random.normal(__A , shape=model_output.shape , dtype=self.dtype)
return (self._get_variance(__A , __A , predicted_variance=__A) ** 0.5) * noise
UpperCamelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype))
UpperCamelCase__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__A , state=__A)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : DDPMSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , ):
return add_noise_common(state.common , __A , __A , __A)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : DDPMSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , ):
return get_velocity_common(state.common , __A , __A , __A)
def __len__( self : Tuple):
return self.config.num_train_timesteps
| 596 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase ( UpperCamelCase__ : dict , UpperCamelCase__ : str , UpperCamelCase__ : set , UpperCamelCase__ : set , UpperCamelCase__ : dict , UpperCamelCase__ : dict , UpperCamelCase__ : PriorityQueue , UpperCamelCase__ : dict , UpperCamelCase__ : float | int , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__UpperCAmelCase = cst_fwd.get(_lowerCAmelCase , np.inf )
__UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__UpperCAmelCase = new_cost_f
__UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : dict , UpperCamelCase__ : dict ):
"""simple docstring"""
__UpperCAmelCase = -1
__UpperCAmelCase = set()
__UpperCAmelCase = set()
__UpperCAmelCase = {source: 0}
__UpperCAmelCase = {destination: 0}
__UpperCAmelCase = {source: None}
__UpperCAmelCase = {destination: None}
__UpperCAmelCase = PriorityQueue()
__UpperCAmelCase = PriorityQueue()
__UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__UpperCAmelCase = queue_forward.get()
visited_forward.add(_lowerCAmelCase )
__UpperCAmelCase = queue_backward.get()
visited_backward.add(_lowerCAmelCase )
__UpperCAmelCase = pass_and_relaxation(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
__UpperCAmelCase = pass_and_relaxation(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__UpperCAmelCase = shortest_distance
return shortest_path_distance
__lowerCAmelCase : Optional[Any] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
__lowerCAmelCase : Dict = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase__ = 'http://www.mocksite.com/file1.txt'
lowerCAmelCase__ = '"text": ["foo", "foo"]'
lowerCAmelCase__ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class snake_case :
"""simple docstring"""
__lowerCAmelCase = 200
__lowerCAmelCase = {"""Content-Length""": """100"""}
__lowerCAmelCase = {}
def snake_case__ ( self , **lowerCAmelCase_ ):
return [bytes(__A , "utf-8" )]
def __lowercase ( *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
import requests
monkeypatch.setattr(_lowerCAmelCase , "request" , _lowerCAmelCase )
__lowercase = URL
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = url
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = [url]
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = {"train": url}
__lowercase = "dummy"
__lowercase = "downloads"
__lowercase = tmp_path
__lowercase = DownloadConfig(
cache_dir=os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , use_etag=_lowerCAmelCase , )
__lowercase = DownloadManager(dataset_name=_lowerCAmelCase , download_config=_lowerCAmelCase )
__lowercase = dl_manager.download(_lowerCAmelCase )
__lowercase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = [downloaded_paths]
__lowercase = [urls]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
__lowercase = downloaded_paths.values()
__lowercase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__lowercase = Path(_lowerCAmelCase )
__lowercase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__lowercase = downloaded_path.read_text()
assert content == CONTENT
__lowercase = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
__lowercase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = str(_lowerCAmelCase )
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = filename
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = [filename]
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = {"train": filename}
__lowercase = "dummy"
__lowercase = xz_file.parent
__lowercase = "extracted"
__lowercase = DownloadConfig(
cache_dir=_lowerCAmelCase , use_etag=_lowerCAmelCase , )
__lowercase = DownloadManager(dataset_name=_lowerCAmelCase , download_config=_lowerCAmelCase )
__lowercase = dl_manager.extract(_lowerCAmelCase )
__lowercase = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = [extracted_paths]
__lowercase = [paths]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
assert "train" in extracted_paths.keys()
__lowercase = extracted_paths.values()
__lowercase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__lowercase = Path(_lowerCAmelCase )
__lowercase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCAmelCase , etag=_lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__lowercase = extracted_path.read_text()
__lowercase = text_file.read_text()
assert extracted_file_content == expected_file_content
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
assert path.endswith(".jsonl" )
for num_items, line in enumerate(_lowerCAmelCase , start=1 ):
__lowercase = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = request.getfixturevalue(_lowerCAmelCase )
__lowercase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) , start=1 ):
_test_jsonl(_lowerCAmelCase , _lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = request.getfixturevalue(_lowerCAmelCase )
__lowercase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) , start=1 ):
_test_jsonl(_lowerCAmelCase , _lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def __lowercase ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
__lowercase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCAmelCase ) , start=1 ):
assert os.path.basename(_lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 321 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 44 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =LEDTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] =LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : Dict =True
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
super().setUp()
UpperCamelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) )
UpperCamelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase = {"unk_token": "<unk>"}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(__A , max_length=len(__A ) , padding=__A , return_tensors='pt' )
self.assertIsInstance(__A , __A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__A , __A )
@require_torch
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
UpperCamelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(__A , padding=__A , return_tensors='pt' )
self.assertIn('input_ids' , __A )
self.assertIn('attention_mask' , __A )
self.assertNotIn('labels' , __A )
self.assertNotIn('decoder_attention_mask' , __A )
@require_torch
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
UpperCamelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(text_target=__A , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def __lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(
['I am a small frog' * 10_24, 'I am a small frog'] , padding=__A , truncation=__A , return_tensors='pt' )
self.assertIsInstance(__A , __A )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = ["A long paragraph for summarization."]
UpperCamelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(__A , return_tensors='pt' )
UpperCamelCase = tokenizer(text_target=__A , return_tensors='pt' )
UpperCamelCase = inputs["input_ids"]
UpperCamelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = ["Summary of the text.", "Another summary."]
UpperCamelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCamelCase = tokenizer(__A , padding=__A )
UpperCamelCase = [[0] * len(__A ) for x in encoded_output["input_ids"]]
UpperCamelCase = tokenizer.pad(__A )
self.assertSequenceEqual(outputs['global_attention_mask'] , __A )
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__A , **__A )
UpperCamelCase = self.tokenizer_class.from_pretrained(__A , **__A )
UpperCamelCase = "A, <mask> AllenNLP sentence."
UpperCamelCase = tokenizer_r.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
UpperCamelCase = tokenizer_p.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 282 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['transformers', 'torch', 'note_seq']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : str,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Dict,*__A : Dict,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
| 44 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : list ) -> List[str]:
if len(_lowerCAmelCase ) < 2:
return collection
def circle_sort_util(UpperCAmelCase_ : list , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> bool:
__lowerCamelCase : str = False
if low == high:
return swapped
__lowerCamelCase : str = low
__lowerCamelCase : Dict = high
while left < right:
if collection[left] > collection[right]:
__lowerCamelCase : Optional[int] = (
collection[right],
collection[left],
)
__lowerCamelCase : List[Any] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__lowerCamelCase : Any = (
collection[right + 1],
collection[left],
)
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : List[Any] = low + int((high - low) / 2 )
__lowerCamelCase : List[Any] = circle_sort_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowerCamelCase : Any = circle_sort_util(_lowerCAmelCase , mid + 1 , _lowerCAmelCase )
return swapped or left_swap or right_swap
__lowerCamelCase : str = True
while is_not_sorted is True:
__lowerCamelCase : str = circle_sort_util(_lowerCAmelCase , 0 , len(_lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
A__ : List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
A__ : Tuple = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 13 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass
| 44 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE__ :Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ :int = ""
else:
SCREAMING_SNAKE_CASE__ :int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ :Any = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ :Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ :List[str] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ :List[str] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ :List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ :Any = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ :List[str] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = dct.pop(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = val
def lowerCamelCase ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE__ :Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ViTConfig()
SCREAMING_SNAKE_CASE__ :List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE__ :Optional[Any] = True
SCREAMING_SNAKE_CASE__ :Optional[Any] = int(vit_name[-1_2:-1_0] )
SCREAMING_SNAKE_CASE__ :str = int(vit_name[-9:-6] )
else:
SCREAMING_SNAKE_CASE__ :List[Any] = 1_0_0_0
SCREAMING_SNAKE_CASE__ :str = "huggingface/label-files"
SCREAMING_SNAKE_CASE__ :Any = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE__ :int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ :str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ :Optional[Any] = idalabel
SCREAMING_SNAKE_CASE__ :List[str] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ :List[str] = int(vit_name[-6:-4] )
SCREAMING_SNAKE_CASE__ :str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
SCREAMING_SNAKE_CASE__ :List[Any] = 1_9_2
SCREAMING_SNAKE_CASE__ :Optional[int] = 7_6_8
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 1_2
SCREAMING_SNAKE_CASE__ :Optional[Any] = 3
elif vit_name[9:].startswith('small' ):
SCREAMING_SNAKE_CASE__ :Optional[Any] = 3_8_4
SCREAMING_SNAKE_CASE__ :Optional[Any] = 1_5_3_6
SCREAMING_SNAKE_CASE__ :int = 1_2
SCREAMING_SNAKE_CASE__ :List[str] = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
SCREAMING_SNAKE_CASE__ :List[str] = 7_6_8
SCREAMING_SNAKE_CASE__ :Optional[Any] = 2_3_0_4
SCREAMING_SNAKE_CASE__ :List[Any] = 8
SCREAMING_SNAKE_CASE__ :List[Any] = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
SCREAMING_SNAKE_CASE__ :List[Any] = 1_0_2_4
SCREAMING_SNAKE_CASE__ :Optional[Any] = 4_0_9_6
SCREAMING_SNAKE_CASE__ :List[Any] = 2_4
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 1_6
elif vit_name[4:].startswith('huge' ):
SCREAMING_SNAKE_CASE__ :str = 1_2_8_0
SCREAMING_SNAKE_CASE__ :List[Any] = 5_1_2_0
SCREAMING_SNAKE_CASE__ :List[str] = 3_2
SCREAMING_SNAKE_CASE__ :List[str] = 1_6
# load original model from timm
SCREAMING_SNAKE_CASE__ :int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ :Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE__ :int = ViTModel(_lowerCAmelCase ).eval()
else:
SCREAMING_SNAKE_CASE__ :List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ViTImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE__ :Optional[int] = image_processor(images=prepare_img() , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ :Optional[int] = encoding["pixel_values"]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = model(_lowerCAmelCase )
if base_model:
SCREAMING_SNAKE_CASE__ :int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 209 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44 | 0 |
"""simple docstring"""
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(_lowerCAmelCase , x % y )
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(_lowerCAmelCase , _lowerCAmelCase )
def lowercase ( UpperCamelCase : int = 20 ):
"""simple docstring"""
A__ : Tuple =1
for i in range(1 , n + 1 ):
A__ : Any =lcm(_lowerCAmelCase , _lowerCAmelCase )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 656 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int=False) -> str:
'''simple docstring'''
if isinstance(_lowerCAmelCase , _lowerCAmelCase) and isinstance(_lowerCAmelCase , _lowerCAmelCase):
_lowercase : int = len(set_a.intersection(_lowerCAmelCase))
if alternative_union:
_lowercase : str = len(_lowerCAmelCase) + len(_lowerCAmelCase)
else:
_lowercase : List[Any] = len(set_a.union(_lowerCAmelCase))
return intersection / union
if isinstance(_lowerCAmelCase , (list, tuple)) and isinstance(_lowerCAmelCase , (list, tuple)):
_lowercase : Dict = [element for element in set_a if element in set_b]
if alternative_union:
_lowercase : int = len(_lowerCAmelCase) + len(_lowerCAmelCase)
return len(_lowerCAmelCase) / union
else:
_lowercase : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(_lowerCAmelCase) / len(_lowerCAmelCase)
return len(_lowerCAmelCase) / len(_lowerCAmelCase)
return None
if __name__ == "__main__":
A = {'a', 'b', 'c', 'd', 'e'}
A = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 125 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Any ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
UpperCamelCase_: Optional[Any] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__A )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = "sshleifer/tiny-gpt2"
UpperCamelCase_: str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__A , multi_process=__A , )
UpperCamelCase_: Optional[Any] = TensorFlowBenchmark(__A )
UpperCamelCase_: Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[int] = "sgugger/tiny-distilbert-classification"
UpperCamelCase_: List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , only_pretrain_model=__A , )
UpperCamelCase_: Dict = TensorFlowBenchmark(__A )
UpperCamelCase_: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: List[str] = "sshleifer/tiny-gpt2"
UpperCamelCase_: Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
UpperCamelCase_: str = TensorFlowBenchmark(__A )
UpperCamelCase_: int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Any = "sshleifer/tiny-gpt2"
UpperCamelCase_: List[Any] = AutoConfig.from_pretrained(__A )
UpperCamelCase_: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__A , multi_process=__A , )
UpperCamelCase_: Any = TensorFlowBenchmark(__A , [config] )
UpperCamelCase_: Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Any = "sshleifer/tiny-gpt2"
UpperCamelCase_: Tuple = AutoConfig.from_pretrained(__A )
UpperCamelCase_: Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
UpperCamelCase_: Optional[Any] = TensorFlowBenchmark(__A , [config] )
UpperCamelCase_: List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: int = "sshleifer/tiny-gpt2"
UpperCamelCase_: List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
UpperCamelCase_: int = TensorFlowBenchmark(__A )
UpperCamelCase_: Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Dict = "sshleifer/tiny-gpt2"
UpperCamelCase_: Dict = AutoConfig.from_pretrained(__A )
UpperCamelCase_: Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
UpperCamelCase_: Union[str, Any] = TensorFlowBenchmark(__A , [config] )
UpperCamelCase_: Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[Any] = "patrickvonplaten/t5-tiny-random"
UpperCamelCase_: List[str] = AutoConfig.from_pretrained(__A )
UpperCamelCase_: Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
UpperCamelCase_: Dict = TensorFlowBenchmark(__A , configs=[config] )
UpperCamelCase_: List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Tuple = "sshleifer/tiny-gpt2"
UpperCamelCase_: List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__A , multi_process=__A , )
UpperCamelCase_: str = TensorFlowBenchmark(__A )
UpperCamelCase_: List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__A , save_to_csv=__A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__A , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__A , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__A , """env.csv""" ) , multi_process=__A , )
UpperCamelCase_: Dict = TensorFlowBenchmark(__A )
benchmark.run()
self.assertTrue(Path(os.path.join(__A , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__A , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__A , """env.csv""" ) ).exists() )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(snake_case_ : List[Any] ):
self.assertTrue(hasattr(__A , """sequential""" ) )
self.assertTrue(hasattr(__A , """cumulative""" ) )
self.assertTrue(hasattr(__A , """current""" ) )
self.assertTrue(hasattr(__A , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__A , """log.txt""" ) , log_print=__A , trace_memory_line_by_line=__A , eager_mode=__A , multi_process=__A , )
UpperCamelCase_: Tuple = TensorFlowBenchmark(__A )
UpperCamelCase_: str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__A , """log.txt""" ) ).exists() )
| 548 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Any,__A : List[Any]=8_0,__A : Dict=1_6_0_0_0,__A : Tuple=0.0,__A : Dict=1_0,__A : int=2_5,__A : Union[str, Any]="hamming_window",__A : List[str]=32768.0,__A : Union[str, Any]=0.97,__A : str=1.0,__A : Union[str, Any]=True,__A : Tuple=True,__A : Optional[Any]=False,**__A : Optional[Any],):
super().__init__(feature_size=__A,sampling_rate=__A,padding_value=__A,**__A )
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Any = padding_value
_lowerCamelCase : Dict = hop_length
_lowerCamelCase : Tuple = win_length
_lowerCamelCase : str = frame_signal_scale
_lowerCamelCase : List[str] = preemphasis_coeff
_lowerCamelCase : List[str] = mel_floor
_lowerCamelCase : str = normalize_means
_lowerCamelCase : Any = normalize_vars
_lowerCamelCase : List[str] = win_function
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Dict = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self : Any,__A : np.array ):
if self.win_function == "hamming_window":
_lowerCamelCase : Any = window_function(window_length=self.sample_size,name=self.win_function,periodic=__A )
else:
_lowerCamelCase : Optional[int] = window_function(window_length=self.sample_size,name=self.win_function )
_lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
_lowerCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale,window=__A,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__A,preemphasis=self.preemphasis_coeff,mel_filters=__A,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Dict,__A : int ):
# make sure we normalize float32 arrays
if self.normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_lowerCamelCase : Optional[int] = np.subtract(__A,__A )
if self.normalize_vars:
_lowerCamelCase : int = x[:input_length].std(axis=0 )
_lowerCamelCase : Any = np.divide(__A,__A )
if input_length < x.shape[0]:
_lowerCamelCase : Tuple = padding_value
# make sure array is in float32
_lowerCamelCase : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self : Any,__A : List[np.ndarray],__A : Optional[np.ndarray] = None ):
_lowerCamelCase : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A,__A,self.padding_value ) for x, n in zip(__A,__A )]
def __call__( self : Optional[Any],__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : Union[bool, str, PaddingStrategy] = False,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[int] = None,**__A : Optional[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : List[str] = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray(__A,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [raw_speech]
# extract fbank features
_lowerCamelCase : str = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : Union[str, Any] = BatchFeature({"input_features": features} )
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=__A,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=__A,**__A,)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__A ):
_lowerCamelCase : int = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
_lowerCamelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCamelCase : Dict = [np.asarray(__A,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCamelCase : Dict = (
np.array(__A,dtype=np.intaa )
if self._get_padding_strategies(__A,max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["input_features"],attention_mask=__A )
if return_tensors is not None:
_lowerCamelCase : Dict = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 44 | 0 |
UpperCAmelCase_ : List[Any] = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCAmelCase_ : str = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCAmelCase_ : str = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCAmelCase_ : Optional[Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCAmelCase_ : int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCAmelCase_ : Optional[int] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCAmelCase_ : Optional[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCAmelCase_ : List[str] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 570 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ['ConditionalDetrFeatureExtractor']
SCREAMING_SNAKE_CASE__ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 112 |
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> str:
UpperCamelCase__ : Dict = len(_lowerCAmelCase)
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board])
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowerCAmelCase):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCAmelCase , _lowerCAmelCase , )
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
UpperCamelCase__ : list[list[str]] = []
depth_first_search([] , [] , [] , _lowerCAmelCase , _lowerCAmelCase)
# Print all the boards
for board in boards:
for column in board:
print(_lowerCAmelCase)
print('')
print(len(_lowerCAmelCase) , 'solutions were found.')
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 596 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase )
_lowerCamelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 847
_lowerCamelCase : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_lowerCamelCase : Optional[int] = 150
_lowerCamelCase : Union[str, Any] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase : Union[str, Any] = 171
_lowerCamelCase : str = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_lowerCamelCase : Optional[int] = 133
_lowerCamelCase : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase : str = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 65
_lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json"
_lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: dim]
_lowerCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : Any = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_lowerCamelCase : str = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : int = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_lowerCamelCase : Any = prepare_img()
if "vistas" in model_name:
_lowerCamelCase : Any = 65
elif "cityscapes" in model_name:
_lowerCamelCase : Optional[Any] = 65535
else:
_lowerCamelCase : str = 255
_lowerCamelCase : List[str] = True if "ade" in model_name else False
_lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
_lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 44 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def lowerCAmelCase ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ) ) )
def lowerCAmelCase ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
__UpperCAmelCase = (
"Wrong input data's dimensions... "
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(_lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCAmelCase = (
"Wrong input data's shape... "
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(_lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
__UpperCAmelCase = (
"Input data have different datatype... "
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(_lowerCAmelCase )
__UpperCAmelCase = []
for value in value_array:
__UpperCAmelCase = euclidean(_lowerCAmelCase , dataset[0] )
__UpperCAmelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCAmelCase = euclidean(_lowerCAmelCase , _lowerCAmelCase )
if dist > temp_dist:
__UpperCAmelCase = temp_dist
__UpperCAmelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def lowerCAmelCase ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray ):
"""simple docstring"""
return np.dot(_lowerCAmelCase , _lowerCAmelCase ) / (norm(_lowerCAmelCase ) * norm(_lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = BlenderbotSmallTokenizer
__lowerCAmelCase = False
def snake_case__ ( self ):
super().setUp()
__lowercase = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__lowercase = dict(zip(__A , range(len(__A ) ) ) )
__lowercase = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__lowercase = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def snake_case__ ( self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A )
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = "adapt act apte"
__lowercase = "adapt act apte"
return input_text, output_text
def snake_case__ ( self ):
__lowercase = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase = "adapt act apte"
__lowercase = ["adapt", "act", "ap@@", "te"]
__lowercase = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
__lowercase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__lowercase = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def snake_case__ ( self ):
__lowercase = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
__lowercase = "I am a small frog."
__lowercase = tok([src_text] , padding=__A , truncation=__A )["input_ids"]
__lowercase = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def snake_case__ ( self ):
__lowercase = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__lowercase = "I am a small frog ."
__lowercase = "."
__lowercase = tok(__A )["input_ids"]
__lowercase = tok(__A )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 321 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCamelCase : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCamelCase : int = SeqaSeqDataset
# Get datasets
_lowerCamelCase : Tuple = (
dataset_class(
_lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowerCamelCase : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCamelCase : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCamelCase : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowerCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" )
_lowerCamelCase : Dict = data_args.n_val
_lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" )
_lowerCamelCase : Dict = test_output.metrics
_lowerCamelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCamelCase : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
_lowerCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 44 | 0 |
from __future__ import annotations
_snake_case = 'Muhammad Umer Farooq'
_snake_case = 'MIT'
_snake_case = '1.0.0'
_snake_case = 'Muhammad Umer Farooq'
_snake_case = '[email protected]'
_snake_case = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
super().__init__()
UpperCamelCase = []
UpperCamelCase = domain
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[tuple[str, str | None]] ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCamelCase = parse.urljoin(self.domain , __A )
self.urls.append(__A )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
return ".".join(get_sub_domain_name(_lowerCAmelCase ).split('.' )[-2:] )
def __lowerCamelCase ( _lowercase ) -> Tuple:
return parse.urlparse(_lowerCAmelCase ).netloc
def __lowerCamelCase ( _lowercase = "https://github.com" ) -> Optional[int]:
UpperCamelCase = get_domain_name(_lowerCAmelCase )
# Initialize the parser
UpperCamelCase = Parser(_lowerCAmelCase )
try:
# Open URL
UpperCamelCase = requests.get(_lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCamelCase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCamelCase = requests.get(_lowerCAmelCase )
# Get the valid email.
UpperCamelCase = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowerCAmelCase )
if __name__ == "__main__":
_snake_case = emails_from_url('''https://github.com''')
print(F"{len(emails)} emails found:")
print('''\n'''.join(sorted(emails)))
| 282 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : List[Any] = None
lowerCamelCase : List[str] = False
lowerCamelCase : Dict = False
lowerCamelCase : int = False
lowerCamelCase : Tuple = None
lowerCamelCase : Tuple = None
lowerCamelCase : str = False
lowerCamelCase : Dict = False
lowerCamelCase : Optional[Any] = False
lowerCamelCase : str = True
lowerCamelCase : List[Any] = None
lowerCamelCase : int = 1
lowerCamelCase : str = None
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = None
lowerCamelCase : Any = None
def lowercase_ ( self ) -> str:
return self.__class__(**{k: copy.deepcopy(__A ) for k, v in self.__dict__.items()} )
| 13 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : str = 'MCTCTFeatureExtractor'
A_ : Tuple = 'AutoTokenizer'
def __init__( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple ) -> str:
super().__init__(__A , __A )
SCREAMING_SNAKE_CASE__ :Any = self.feature_extractor
SCREAMING_SNAKE_CASE__ :Dict = False
def __call__( self : Optional[Any] , *UpperCamelCase_ : int , **UpperCamelCase_ : Union[str, Any] ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
SCREAMING_SNAKE_CASE__ :int = kwargs.pop('raw_speech' )
else:
SCREAMING_SNAKE_CASE__ :int = kwargs.pop('audio' , __A )
SCREAMING_SNAKE_CASE__ :Optional[Any] = kwargs.pop('sampling_rate' , __A )
SCREAMING_SNAKE_CASE__ :Tuple = kwargs.pop('text' , __A )
if len(__A ) > 0:
SCREAMING_SNAKE_CASE__ :List[Any] = args[0]
SCREAMING_SNAKE_CASE__ :int = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE__ :Tuple = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if text is not None:
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.tokenizer(__A , **__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ :str = encodings["input_ids"]
return inputs
def __lowerCamelCase ( self : Optional[Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[str] ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__A , **__A )
def __lowerCamelCase ( self : Union[str, Any] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__A , **__A )
SCREAMING_SNAKE_CASE__ :Optional[int] = kwargs.pop('input_features' , __A )
SCREAMING_SNAKE_CASE__ :int = kwargs.pop('labels' , __A )
if len(__A ) > 0:
SCREAMING_SNAKE_CASE__ :str = args[0]
SCREAMING_SNAKE_CASE__ :str = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE__ :Tuple = self.feature_extractor.pad(__A , *__A , **__A )
if labels is not None:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.tokenizer.pad(__A , **__A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE__ :List[Any] = labels["input_ids"]
return input_features
def __lowerCamelCase ( self : int , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str] ) -> Optional[Any]:
return self.tokenizer.decode(*__A , **__A )
@contextmanager
def __lowerCamelCase ( self : List[Any] ) -> Optional[int]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ :List[str] = self.feature_extractor
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
| 209 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
"""simple docstring"""
__A : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.35_5818,
}
def lowercase ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : float ):
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
A__ : List[str] =(
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(_lowerCAmelCase )}'''
)
raise ValueError(_lowerCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] ,UpperCamelCase : str ,UpperCamelCase : List[str]=13 ,UpperCamelCase : str=32 ,UpperCamelCase : Tuple=2 ,UpperCamelCase : Any=3 ,UpperCamelCase : Dict=16 ,UpperCamelCase : Dict=[32, 64, 128] ,UpperCamelCase : List[str]=[1, 2, 1] ,UpperCamelCase : str=[2, 2, 4] ,UpperCamelCase : Optional[int]=2 ,UpperCamelCase : Dict=2.0 ,UpperCamelCase : str=True ,UpperCamelCase : Tuple=0.0 ,UpperCamelCase : int=0.0 ,UpperCamelCase : List[str]=0.1 ,UpperCamelCase : Any="gelu" ,UpperCamelCase : List[Any]=False ,UpperCamelCase : Optional[Any]=True ,UpperCamelCase : List[str]=0.0_2 ,UpperCamelCase : Tuple=1e-5 ,UpperCamelCase : Any=True ,UpperCamelCase : Tuple=None ,UpperCamelCase : Tuple=True ,UpperCamelCase : Tuple=10 ,UpperCamelCase : List[Any]=8 ,UpperCamelCase : Optional[int]=["stage1", "stage2"] ,UpperCamelCase : int=[1, 2] ,) -> Optional[int]:
_lowercase : List[Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Optional[int] = image_size
_lowercase : int = patch_size
_lowercase : Optional[Any] = num_channels
_lowercase : int = embed_dim
_lowercase : int = hidden_sizes
_lowercase : List[Any] = depths
_lowercase : Any = num_heads
_lowercase : List[str] = window_size
_lowercase : str = mlp_ratio
_lowercase : Any = qkv_bias
_lowercase : str = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : List[str] = drop_path_rate
_lowercase : str = hidden_act
_lowercase : Union[str, Any] = use_absolute_embeddings
_lowercase : List[Any] = patch_norm
_lowercase : Tuple = layer_norm_eps
_lowercase : str = initializer_range
_lowercase : Optional[int] = is_training
_lowercase : Tuple = scope
_lowercase : List[Any] = use_labels
_lowercase : int = type_sequence_label_size
_lowercase : Tuple = encoder_stride
_lowercase : Any = out_features
_lowercase : Any = out_indices
def _lowerCamelCase ( self : Any ) -> List[str]:
_lowercase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def _lowerCamelCase ( self : int ,UpperCamelCase : Union[str, Any] ,UpperCamelCase : Tuple ,UpperCamelCase : List[Any] ) -> int:
_lowercase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowercase : Optional[Any] = model(__A )
_lowercase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowercase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def _lowerCamelCase ( self : int ,UpperCamelCase : Optional[int] ,UpperCamelCase : int ,UpperCamelCase : Optional[int] ) -> Dict:
_lowercase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowercase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowercase : List[str] = None
_lowercase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowercase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : Optional[int] ,UpperCamelCase : Dict ,UpperCamelCase : Dict ) -> Any:
_lowercase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowercase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowercase : Dict = 1
_lowercase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowercase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self : List[Any] ,UpperCamelCase : Union[str, Any] ,UpperCamelCase : List[Any] ,UpperCamelCase : Optional[Any] ) -> List[Any]:
_lowercase : Union[str, Any] = self.type_sequence_label_size
_lowercase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowercase : Optional[int] = model(__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase : str = 1
_lowercase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowercase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Optional[int] ) -> Dict:
_lowercase : int = self.prepare_config_and_inputs()
_lowercase : Union[str, Any] = config_and_inputs
_lowercase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Tuple = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : Any = False
def _lowerCamelCase ( self : int ) -> Optional[Any]:
_lowercase : Optional[int] = FocalNetModelTester(self )
_lowercase : int = ConfigTester(self ,config_class=__A ,embed_dim=37 ,has_text_modality=__A )
def _lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : List[str] ) -> Any:
return
def _lowerCamelCase ( self : Any ) -> Optional[Any]:
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowerCamelCase ( self : int ) -> Optional[Any]:
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def _lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def _lowerCamelCase ( self : int ) -> List[Any]:
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def _lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def _lowerCamelCase ( self : List[str] ) -> Optional[Any]:
pass
def _lowerCamelCase ( self : List[str] ) -> Any:
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowercase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowercase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A ,nn.Linear ) )
def _lowerCamelCase ( self : List[Any] ) -> Tuple:
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowercase : Union[str, Any] = model_class(__A )
_lowercase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : int = [*signature.parameters.keys()]
_lowercase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__A )
def _lowerCamelCase ( self : Tuple ,UpperCamelCase : Any ,UpperCamelCase : List[Any] ,UpperCamelCase : str ,UpperCamelCase : Any ) -> Optional[int]:
_lowercase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(__A ,__A ) )
_lowercase : Optional[int] = outputs.hidden_states
_lowercase : int = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ) ,__A )
# FocalNet has a different seq_length
_lowercase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowercase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
_lowercase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ) ,__A )
_lowercase : Tuple = reshaped_hidden_states[0].shape
_lowercase : List[str] = (
reshaped_hidden_states[0].view(__A ,__A ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def _lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowercase : List[Any] = True
self.check_hidden_states_output(__A ,__A ,__A ,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : List[Any] = True
self.check_hidden_states_output(__A ,__A ,__A ,__A )
def _lowerCamelCase ( self : Optional[Any] ) -> Tuple:
_lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Tuple = 3
_lowercase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowercase : Tuple = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowercase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowercase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowercase : List[Any] = True
self.check_hidden_states_output(__A ,__A ,__A ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Optional[Any] = True
self.check_hidden_states_output(__A ,__A ,__A ,(padded_height, padded_width) )
@slow
def _lowerCamelCase ( self : Tuple ) -> Optional[int]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
_lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowercase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self : Union[str, Any] ) -> str:
_lowercase : Any = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(__A )
_lowercase : int = self.default_image_processor
_lowercase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowercase : Dict = image_processor(images=__A ,return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
_lowercase : Dict = model(**__A )
# verify the logits
_lowercase : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__A )
_lowercase : List[str] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__A ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ : int = FocalNetConfig
lowerCAmelCase__ : int = False
def _lowerCamelCase ( self : int ) -> Optional[Any]:
_lowercase : int = FocalNetModelTester(self )
| 125 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[str] = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__UpperCamelCase : Optional[Any] = Features({"""text""": Value("""string""" )} )
__UpperCamelCase : Union[str, Any] = Features({} )
__UpperCamelCase : List[Any] = """text"""
@property
def lowerCAmelCase__ ( self : Tuple ):
return {self.text_column: "text"}
| 548 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : str = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 570 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _A ( lowerCamelCase , lowerCamelCase=() , lowerCamelCase=None , lowerCamelCase="no" , lowerCamelCase="29500" ):
a__ : Any = False
a__ : List[str] = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
a__ : Any = True
elif "IPython" in sys.modules:
a__ : Optional[Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
a__ : Union[str, Any] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , _lowerCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
a__ : Optional[int] = 8
a__ : List[Any] = PrepareForLaunch(_lowerCAmelCase , distributed_type="TPU" )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*_lowerCAmelCase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase , master_addr="127.0.01" , master_port=_lowerCAmelCase , mixed_precision=_lowerCAmelCase ):
a__ : Tuple = PrepareForLaunch(_lowerCAmelCase , distributed_type="MULTI_GPU" )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
a__ : Union[str, Any] = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*_lowerCAmelCase )
def _A ( lowerCamelCase , lowerCamelCase=() , lowerCamelCase=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
a__ : int = PrepareForLaunch(_lowerCAmelCase , debug=_lowerCAmelCase )
start_processes(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="fork" )
| 112 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ = 1_000) -> List[Any]:
UpperCamelCase__ : List[str] = 2**power
UpperCamelCase__ : Any = str(_lowerCAmelCase)
UpperCamelCase__ : Optional[int] = list(_lowerCAmelCase)
UpperCamelCase__ : Any = 0
for i in list_num:
sum_of_num += int(_lowerCAmelCase)
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase__ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
lowerCAmelCase__ = solution(power)
print('Sum of the digits is: ', result)
| 596 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def snake_case__ ( *__a : Any , **__a : int ) -> Optional[Any]:
pass
def lowerCAmelCase ( UpperCamelCase__ : Image ):
"""simple docstring"""
__UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A ( unittest.TestCase ):
a_ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def snake_case__ ( self : List[str] , __a : List[str] , __a : List[Any] , __a : Tuple ) -> List[Any]:
__UpperCAmelCase = DepthEstimationPipeline(model=__A , image_processor=__A )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case__ ( self : str , __a : Optional[Any] , __a : int ) -> List[Any]:
__UpperCAmelCase = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , __A )
import datasets
__UpperCAmelCase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__UpperCAmelCase = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , __A , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
pass
@slow
@require_torch
def snake_case__ ( self : Optional[int] ) -> str:
__UpperCAmelCase = "Intel/dpt-large"
__UpperCAmelCase = pipeline('''depth-estimation''' , model=__A )
__UpperCAmelCase = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
__UpperCAmelCase = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def snake_case__ ( self : List[Any] ) -> List[str]:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 262 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowercase ( _UpperCAmelCase=None , _UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_lowerCAmelCase )
@dataclass
class snake_case :
"""simple docstring"""
__lowerCAmelCase = list_field(
default=[] ,metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} ,)
__lowerCAmelCase = list_field(
default=[8] ,metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
__lowerCAmelCase = list_field(
default=[8, 32, 128, 512] ,metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
__lowerCAmelCase = field(default=__snake_case ,metadata={"""help""": """Use FP16 to accelerate inference."""} )
__lowerCAmelCase = field(default=__snake_case ,metadata={"""help""": """Benchmark training of model"""} )
__lowerCAmelCase = field(default=__snake_case ,metadata={"""help""": """Verbose memory tracing"""} )
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} ,)
__lowerCAmelCase = field(default=__snake_case ,metadata={"""help""": """Trace memory line by line"""} )
__lowerCAmelCase = field(default=__snake_case ,metadata={"""help""": """Save result to a CSV file"""} )
__lowerCAmelCase = field(default=__snake_case ,metadata={"""help""": """Save all print statements in a log file"""} )
__lowerCAmelCase = field(default=__snake_case ,metadata={"""help""": """Whether to print environment information"""} )
__lowerCAmelCase = field(
default=__snake_case ,metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} ,)
__lowerCAmelCase = field(
default=f'''inference_time_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving time results to csv."""} ,)
__lowerCAmelCase = field(
default=f'''inference_memory_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving memory results to csv."""} ,)
__lowerCAmelCase = field(
default=f'''train_time_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving time results to csv for training."""} ,)
__lowerCAmelCase = field(
default=f'''train_memory_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} ,)
__lowerCAmelCase = field(
default=f'''env_info_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving environment information."""} ,)
__lowerCAmelCase = field(
default=f'''log_{round(time() )}.csv''' ,metadata={"""help""": """Log filename used if print statements are saved in log."""} ,)
__lowerCAmelCase = field(default=3 ,metadata={"""help""": """Times an experiment will be run."""} )
__lowerCAmelCase = field(
default=__snake_case ,metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} ,)
def snake_case__ ( self ):
warnings.warn(
f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , __A , )
def snake_case__ ( self ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case__ ( self ):
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def snake_case__ ( self ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 321 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 44 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def __lowerCamelCase ( ) -> Union[str, Any]:
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('-f' )
UpperCamelCase = parser.parse_args()
return args.f
def __lowerCamelCase ( _lowercase , _lowercase="eval" ) -> Dict:
UpperCamelCase = os.path.join(_lowerCAmelCase , F'{split}_results.json' )
if os.path.exists(_lowerCAmelCase ):
with open(_lowerCAmelCase , 'r' ) as f:
return json.load(_lowerCAmelCase )
raise ValueError(F'can\'t find {path}' )
_snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(__A , 'argv' , __A ):
run_flax_glue.main()
UpperCamelCase = get_results(__A )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(__A , 'argv' , __A ):
run_clm_flax.main()
UpperCamelCase = get_results(__A )
self.assertLess(result['eval_perplexity'] , 1_00 )
@slow
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(__A , 'argv' , __A ):
run_summarization_flax.main()
UpperCamelCase = get_results(__A , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(__A , 'argv' , __A ):
run_mlm_flax.main()
UpperCamelCase = get_results(__A )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(__A , 'argv' , __A ):
run_ta_mlm_flax.main()
UpperCamelCase = get_results(__A )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = 7 if get_gpu_count() > 1 else 2
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(__A , 'argv' , __A ):
run_flax_ner.main()
UpperCamelCase = get_results(__A )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(__A , 'argv' , __A ):
run_qa.main()
UpperCamelCase = get_results(__A )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 282 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['transformers', 'torch', 'note_seq']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : str,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Dict,*__A : Dict,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
| 44 | 0 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float ) -> Tuple:
__lowerCamelCase : Dict = math.sqrt(_lowerCAmelCase )
__lowerCamelCase : Optional[int] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def UpperCAmelCase__ ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> List[str]:
__lowerCamelCase : Optional[int] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : float ) -> str:
__lowerCamelCase : Optional[Any] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _lowerCAmelCase ):
for j in range(0 , _lowerCAmelCase ):
__lowerCamelCase : Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : int , ) -> Dict:
__lowerCamelCase : Dict = np.zeros(img.shape )
__lowerCamelCase : Optional[Any] = get_gauss_kernel(_lowerCAmelCase , _lowerCAmelCase )
__lowerCamelCase : Any = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowerCamelCase : Optional[Any] = get_slice(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowerCamelCase : Optional[int] = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowerCamelCase : Any = vec_gaussian(_lowerCAmelCase , _lowerCAmelCase )
__lowerCamelCase : List[Any] = np.multiply(_lowerCAmelCase , _lowerCAmelCase )
__lowerCamelCase : Optional[int] = np.multiply(_lowerCAmelCase , _lowerCAmelCase )
__lowerCamelCase : Tuple = np.sum(_lowerCAmelCase ) / np.sum(_lowerCAmelCase )
__lowerCamelCase : Optional[int] = val
return imga
def UpperCAmelCase__ ( UpperCAmelCase_ : list ) -> Optional[int]:
__lowerCamelCase : Dict = args[1] if args[1:] else "../image_data/lena.jpg"
__lowerCamelCase : List[str] = float(args[2] ) if args[2:] else 1.0
__lowerCamelCase : Union[str, Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowerCamelCase : List[str] = int(args[4] )
__lowerCamelCase : Union[str, Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowerCamelCase : Optional[Any] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
A__ : str = parse_args(sys.argv)
A__ : List[Any] = cva.imread(filename, 0)
cva.imshow("""input image""", img)
A__ : Tuple = img / 255
A__ : Optional[Any] = out.astype("""float32""")
A__ : str = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
A__ : Optional[int] = out * 255
A__ : List[str] = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 13 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass
| 44 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : Tuple = StableDiffusionPanoramaPipeline
A_ : str = TEXT_TO_IMAGE_PARAMS
A_ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
A_ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowerCamelCase ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ :Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ :List[Any] = DDIMScheduler()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ :Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ :List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE__ :Tuple = CLIPTextModel(__A )
SCREAMING_SNAKE_CASE__ :Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE__ :Tuple = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int]=0 ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :Tuple = torch.manual_seed(__A )
SCREAMING_SNAKE_CASE__ :Any = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __lowerCamelCase ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ :Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ :Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ :str = StableDiffusionPanoramaPipeline(**__A )
SCREAMING_SNAKE_CASE__ :List[str] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ :Dict = self.get_dummy_inputs(__A )
SCREAMING_SNAKE_CASE__ :Tuple = sd_pipe(**__A ).images
SCREAMING_SNAKE_CASE__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ :Tuple = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self : int ) -> str:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self : Tuple ) -> Any:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def __lowerCamelCase ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ :Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ :int = StableDiffusionPanoramaPipeline(**__A )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ :Any = self.get_dummy_inputs(__A )
SCREAMING_SNAKE_CASE__ :Tuple = "french fries"
SCREAMING_SNAKE_CASE__ :Tuple = sd_pipe(**__A , negative_prompt=__A )
SCREAMING_SNAKE_CASE__ :Dict = output.images
SCREAMING_SNAKE_CASE__ :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ :Union[str, Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ :Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = StableDiffusionPanoramaPipeline(**__A )
SCREAMING_SNAKE_CASE__ :int = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ :Any = self.get_dummy_inputs(__A )
SCREAMING_SNAKE_CASE__ :Any = sd_pipe(**__A , view_batch_size=2 )
SCREAMING_SNAKE_CASE__ :Tuple = output.images
SCREAMING_SNAKE_CASE__ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ :List[Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ :int = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ :List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ :Optional[int] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' )
SCREAMING_SNAKE_CASE__ :Any = StableDiffusionPanoramaPipeline(**__A )
SCREAMING_SNAKE_CASE__ :Any = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ :Any = self.get_dummy_inputs(__A )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = sd_pipe(**__A ).images
SCREAMING_SNAKE_CASE__ :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ :str = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ :Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ :Optional[int] = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , skip_prk_steps=__A )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = StableDiffusionPanoramaPipeline(**__A )
SCREAMING_SNAKE_CASE__ :List[Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ :Tuple = self.get_dummy_inputs(__A )
SCREAMING_SNAKE_CASE__ :Dict = sd_pipe(**__A ).images
SCREAMING_SNAKE_CASE__ :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ :List[Any] = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __lowerCamelCase ( self : Optional[int] ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Dict=0 ) -> Dict:
SCREAMING_SNAKE_CASE__ :List[Any] = torch.manual_seed(__A )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __lowerCamelCase ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ :List[str] = "stabilityai/stable-diffusion-2-base"
SCREAMING_SNAKE_CASE__ :Any = DDIMScheduler.from_pretrained(__A , subfolder='scheduler' )
SCREAMING_SNAKE_CASE__ :Tuple = StableDiffusionPanoramaPipeline.from_pretrained(__A , scheduler=__A , safety_checker=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ :str = self.get_inputs()
SCREAMING_SNAKE_CASE__ :int = pipe(**__A ).images
SCREAMING_SNAKE_CASE__ :Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
SCREAMING_SNAKE_CASE__ :List[Any] = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def __lowerCamelCase ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=__A )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ :List[Any] = self.get_inputs()
SCREAMING_SNAKE_CASE__ :Tuple = pipe(**__A ).images
SCREAMING_SNAKE_CASE__ :List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
SCREAMING_SNAKE_CASE__ :List[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowerCamelCase ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Any = 0
def callback_fn(UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor ) -> None:
SCREAMING_SNAKE_CASE__ :str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE__ :int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
SCREAMING_SNAKE_CASE__ :Dict = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ :str = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
SCREAMING_SNAKE_CASE__ :Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
SCREAMING_SNAKE_CASE__ :Optional[int] = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ :Tuple = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
SCREAMING_SNAKE_CASE__ :List[str] = False
SCREAMING_SNAKE_CASE__ :Optional[int] = "stabilityai/stable-diffusion-2-base"
SCREAMING_SNAKE_CASE__ :Optional[Any] = DDIMScheduler.from_pretrained(__A , subfolder='scheduler' )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(__A , scheduler=__A , safety_checker=__A )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ :int = self.get_inputs()
pipe(**__A , callback=__A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ :int = "stabilityai/stable-diffusion-2-base"
SCREAMING_SNAKE_CASE__ :Tuple = DDIMScheduler.from_pretrained(__A , subfolder='scheduler' )
SCREAMING_SNAKE_CASE__ :List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(__A , scheduler=__A , safety_checker=__A )
SCREAMING_SNAKE_CASE__ :Optional[Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.get_inputs()
SCREAMING_SNAKE_CASE__ :str = pipe(**__A )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 209 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__A : int = False
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : int =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
A__ : Optional[Any] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
A__ : int =torch.manual_seed(0 )
A__ : List[Any] =pipe.dual_guided(
prompt="first prompt" , image=__A , text_to_image_strength=0.75 , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
A__ : Dict =VersatileDiffusionPipeline.from_pretrained(__A , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
A__ : Tuple =generator.manual_seed(0 )
A__ : Optional[Any] =pipe.dual_guided(
prompt="first prompt" , image=__A , text_to_image_strength=0.75 , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _UpperCAmelCase ( self : str ):
A__ : Dict =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
A__ : Tuple ="cyberpunk 2077"
A__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
A__ : Tuple =torch.manual_seed(0 )
A__ : Tuple =pipe.dual_guided(
prompt=__A , image=__A , text_to_image_strength=0.75 , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
A__ : int =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Union[str, Any] =np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
A__ : List[str] ="A painting of a squirrel eating a burger "
A__ : Union[str, Any] =torch.manual_seed(0 )
A__ : Any =pipe.text_to_image(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
A__ : Optional[int] =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
A__ : int =pipe.image_variation(__A , generator=__A , output_type="numpy" ).images
A__ : int =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Optional[int] =np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 656 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : Tuple ,UpperCamelCase : Optional[Any] ,UpperCamelCase : int ) -> Union[str, Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowercase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__A ,scheduler=__A )
@torch.no_grad()
def __call__( self : Tuple ,UpperCamelCase : int = 1 ,UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,UpperCamelCase : float = 0.0 ,UpperCamelCase : int = 50 ,UpperCamelCase : Optional[bool] = None ,UpperCamelCase : Optional[str] = "pil" ,UpperCamelCase : bool = True ,) -> str:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size ,__A ):
_lowercase : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowercase : int = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__A ,__A ) and len(__A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowercase : Tuple = randn_tensor(__A ,generator=__A ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowercase : Any = self.unet(__A ,__A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowercase : Any = self.scheduler.step(
__A ,__A ,__A ,eta=__A ,use_clipped_model_output=__A ,generator=__A ).prev_sample
_lowercase : Union[str, Any] = (image / 2 + 0.5).clamp(0 ,1 )
_lowercase : Tuple = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
_lowercase : Any = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 125 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
lowerCamelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A__ ( lowerCamelCase ) -> Union[str, Any]:
UpperCamelCase_: Optional[int] = torch.load(_lowerCAmelCase , map_location="""cpu""" )
return sd
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=rename_keys_prefix ) -> Union[str, Any]:
UpperCamelCase_: Any = OrderedDict()
UpperCamelCase_: str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
UpperCamelCase_: Any = key
for name_pair in rename_keys_prefix:
UpperCamelCase_: Dict = new_key.replace(name_pair[0] , name_pair[1] )
UpperCamelCase_: Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
UpperCamelCase_: List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
UpperCamelCase_: Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
UpperCamelCase_: Union[str, Any] = {"visual_embedding_dim": 5_12}
elif "vqa_advanced" in checkpoint_path:
UpperCamelCase_: List[str] = {"visual_embedding_dim": 20_48}
elif "vqa" in checkpoint_path:
UpperCamelCase_: int = {"visual_embedding_dim": 20_48}
elif "nlvr" in checkpoint_path:
UpperCamelCase_: List[str] = {"visual_embedding_dim": 10_24}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
UpperCamelCase_: Any = {"visual_embedding_dim": 5_12}
UpperCamelCase_: List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
UpperCamelCase_: Tuple = {"visual_embedding_dim": 20_48}
UpperCamelCase_: Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
UpperCamelCase_: Union[str, Any] = {"visual_embedding_dim": 20_48, "num_labels": 31_29}
UpperCamelCase_: Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
UpperCamelCase_: Tuple = {
"visual_embedding_dim": 10_24,
"num_labels": 2,
}
UpperCamelCase_: Optional[Any] = "nlvr"
UpperCamelCase_: str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
UpperCamelCase_: str = load_state_dict(_lowerCAmelCase )
UpperCamelCase_: List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
UpperCamelCase_: List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
UpperCamelCase_: Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
UpperCamelCase_: Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
UpperCamelCase_: str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 548 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Any,__A : List[Any]=8_0,__A : Dict=1_6_0_0_0,__A : Tuple=0.0,__A : Dict=1_0,__A : int=2_5,__A : Union[str, Any]="hamming_window",__A : List[str]=32768.0,__A : Union[str, Any]=0.97,__A : str=1.0,__A : Union[str, Any]=True,__A : Tuple=True,__A : Optional[Any]=False,**__A : Optional[Any],):
super().__init__(feature_size=__A,sampling_rate=__A,padding_value=__A,**__A )
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Any = padding_value
_lowerCamelCase : Dict = hop_length
_lowerCamelCase : Tuple = win_length
_lowerCamelCase : str = frame_signal_scale
_lowerCamelCase : List[str] = preemphasis_coeff
_lowerCamelCase : List[str] = mel_floor
_lowerCamelCase : str = normalize_means
_lowerCamelCase : Any = normalize_vars
_lowerCamelCase : List[str] = win_function
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Dict = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self : Any,__A : np.array ):
if self.win_function == "hamming_window":
_lowerCamelCase : Any = window_function(window_length=self.sample_size,name=self.win_function,periodic=__A )
else:
_lowerCamelCase : Optional[int] = window_function(window_length=self.sample_size,name=self.win_function )
_lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
_lowerCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale,window=__A,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__A,preemphasis=self.preemphasis_coeff,mel_filters=__A,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Dict,__A : int ):
# make sure we normalize float32 arrays
if self.normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_lowerCamelCase : Optional[int] = np.subtract(__A,__A )
if self.normalize_vars:
_lowerCamelCase : int = x[:input_length].std(axis=0 )
_lowerCamelCase : Any = np.divide(__A,__A )
if input_length < x.shape[0]:
_lowerCamelCase : Tuple = padding_value
# make sure array is in float32
_lowerCamelCase : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self : Any,__A : List[np.ndarray],__A : Optional[np.ndarray] = None ):
_lowerCamelCase : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A,__A,self.padding_value ) for x, n in zip(__A,__A )]
def __call__( self : Optional[Any],__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : Union[bool, str, PaddingStrategy] = False,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[int] = None,**__A : Optional[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : List[str] = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray(__A,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [raw_speech]
# extract fbank features
_lowerCamelCase : str = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : Union[str, Any] = BatchFeature({"input_features": features} )
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=__A,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=__A,**__A,)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__A ):
_lowerCamelCase : int = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
_lowerCamelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCamelCase : Dict = [np.asarray(__A,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCamelCase : Dict = (
np.array(__A,dtype=np.intaa )
if self._get_padding_strategies(__A,max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["input_features"],attention_mask=__A )
if return_tensors is not None:
_lowerCamelCase : Dict = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 44 | 0 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
a_ : Optional[Any] = ort.SessionOptions()
a_ : str = False
return options
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
a_ : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
a_ : List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
a_ : str = "A red cat sitting on a park bench"
a_ : Tuple = np.random.RandomState(0 )
a_ : List[str] = pipe(
prompt=__A , image=__A , mask_image=__A , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__A , output_type='np' , )
a_ : Dict = output.images
a_ : Tuple = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ : List[str] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
a_ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
a_ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
a_ : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
a_ : Dict = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=__A , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
a_ : Tuple = "A red cat sitting on a park bench"
a_ : Tuple = np.random.RandomState(0 )
a_ : Tuple = pipe(
prompt=__A , image=__A , mask_image=__A , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__A , output_type='np' , )
a_ : str = output.images
a_ : List[Any] = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ : List[Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 570 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _A ( lowerCamelCase , lowerCamelCase=0.999 , lowerCamelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
a__ : str = []
for i in range(_lowerCAmelCase ):
a__ : Optional[int] = i / num_diffusion_timesteps
a__ : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ):
_UpperCamelCase : Dict = [e.name for e in KarrasDiffusionSchedulers]
_UpperCamelCase : Dict = 2
@register_to_config
def __init__( self , snake_case = 1_000 , snake_case = 0.00_085 , snake_case = 0.012 , snake_case = "linear" , snake_case = None , snake_case = "epsilon" , snake_case = "linspace" , snake_case = 0 , ) -> List[str]:
"""simple docstring"""
if trained_betas is not None:
a__ : List[str] = torch.tensor(__A , dtype=torch.floataa )
elif beta_schedule == "linear":
a__ : Optional[Any] = torch.linspace(__A , __A , __A , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __A , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ : Union[str, Any] = betas_for_alpha_bar(__A )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
a__ : Dict = 1.0 - self.betas
a__ : List[Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__A , __A , __A )
def _snake_case ( self , snake_case , snake_case=None ) -> Optional[Any]:
"""simple docstring"""
if schedule_timesteps is None:
a__ : Tuple = self.timesteps
a__ : Optional[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
a__ : Optional[int] = 1 if len(__A ) > 1 else 0
else:
a__ : Optional[Any] = timestep.cpu().item() if torch.is_tensor(__A ) else timestep
a__ : List[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _snake_case ( self ) -> str:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _snake_case ( self , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : Tuple = self.index_for_timestep(__A )
if self.state_in_first_order:
a__ : str = self.sigmas[step_index]
else:
a__ : Dict = self.sigmas_interpol[step_index]
a__ : Tuple = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _snake_case ( self , snake_case , snake_case = None , snake_case = None , ) -> str:
"""simple docstring"""
a__ : int = num_inference_steps
a__ : Optional[int] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a__ : Optional[Any] = np.linspace(0 , num_train_timesteps - 1 , __A , dtype=__A )[::-1].copy()
elif self.config.timestep_spacing == "leading":
a__ : Optional[int] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ : Tuple = (np.arange(0 , __A ) * step_ratio).round()[::-1].copy().astype(__A )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a__ : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ : Union[str, Any] = (np.arange(__A , 0 , -step_ratio )).round().copy().astype(__A )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.""" )
a__ : int = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
a__ : Dict = torch.from_numpy(np.log(__A ) ).to(__A )
a__ : Union[str, Any] = np.interp(__A , np.arange(0 , len(__A ) ) , __A )
a__ : Dict = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
a__ : Dict = torch.from_numpy(__A ).to(device=__A )
# interpolate sigmas
a__ : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
a__ : Tuple = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
a__ : Dict = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__A ).startswith("mps" ):
# mps does not support float64
a__ : List[Any] = torch.from_numpy(__A ).to(__A , dtype=torch.floataa )
else:
a__ : Tuple = torch.from_numpy(__A ).to(__A )
# interpolate timesteps
a__ : Optional[Any] = self.sigma_to_t(__A ).to(__A , dtype=timesteps.dtype )
a__ : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
a__ : Any = torch.cat([timesteps[:1], interleaved_timesteps] )
a__ : Dict = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a__ : List[str] = defaultdict(__A )
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
a__ : Any = sigma.log()
# get distribution
a__ : int = log_sigma - self.log_sigmas[:, None]
# get sigmas range
a__ : List[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
a__ : Dict = low_idx + 1
a__ : Dict = self.log_sigmas[low_idx]
a__ : Dict = self.log_sigmas[high_idx]
# interpolate sigmas
a__ : List[str] = (low - log_sigma) / (low - high)
a__ : Dict = w.clamp(0 , 1 )
# transform interpolation to time range
a__ : Dict = (1 - w) * low_idx + w * high_idx
a__ : List[str] = t.view(sigma.shape )
return t
@property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return self.sample is None
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case = True , ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = self.index_for_timestep(__A )
# advance index counter by 1
a__ : str = timestep.cpu().item() if torch.is_tensor(__A ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a__ : int = self.sigmas[step_index]
a__ : List[Any] = self.sigmas_interpol[step_index + 1]
a__ : Dict = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
a__ : Union[str, Any] = self.sigmas[step_index - 1]
a__ : Union[str, Any] = self.sigmas_interpol[step_index]
a__ : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a__ : List[str] = 0
a__ : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a__ : Optional[int] = sigma_hat if self.state_in_first_order else sigma_interpol
a__ : List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a__ : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
a__ : Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a__ : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a__ : List[str] = sigma_interpol - sigma_hat
# store for 2nd order step
a__ : Union[str, Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
a__ : str = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
a__ : str = sigma_next - sigma_hat
a__ : List[Any] = self.sample
a__ : Optional[int] = None
a__ : Optional[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__A )
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> str:
"""simple docstring"""
a__ : Union[str, Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__A ):
# mps does not support float64
a__ : List[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
a__ : str = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
a__ : Tuple = self.timesteps.to(original_samples.device )
a__ : Any = timesteps.to(original_samples.device )
a__ : Optional[int] = [self.index_for_timestep(__A , __A ) for t in timesteps]
a__ : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
a__ : Dict = sigma.unsqueeze(-1 )
a__ : Tuple = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps
| 112 |
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
if not nums:
raise ValueError('List is empty')
return sum(_lowerCAmelCase) / len(_lowerCAmelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 596 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase )
_lowerCamelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 847
_lowerCamelCase : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_lowerCamelCase : Optional[int] = 150
_lowerCamelCase : Union[str, Any] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase : Union[str, Any] = 171
_lowerCamelCase : str = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_lowerCamelCase : Optional[int] = 133
_lowerCamelCase : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase : str = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 65
_lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json"
_lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: dim]
_lowerCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : Any = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_lowerCamelCase : str = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : int = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_lowerCamelCase : Any = prepare_img()
if "vistas" in model_name:
_lowerCamelCase : Any = 65
elif "cityscapes" in model_name:
_lowerCamelCase : Optional[Any] = 65535
else:
_lowerCamelCase : str = 255
_lowerCamelCase : List[str] = True if "ade" in model_name else False
_lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
_lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 44 | 0 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = len(_lowerCAmelCase ) + 1
__UpperCAmelCase = len(_lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__UpperCAmelCase = [[0 for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
# since string of zero length match pattern of zero length
__UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowerCAmelCase ):
__UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowerCAmelCase ):
__UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowerCAmelCase ):
for j in range(1 , _lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__UpperCAmelCase = dp[i - 1][j]
else:
__UpperCAmelCase = 0
else:
__UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase : Optional[Any] = 'aab'
__lowerCAmelCase : List[str] = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 262 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ = logging.getLogger(__name__)
def __lowercase ( ) -> Dict:
'''simple docstring'''
__lowercase = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=_lowerCAmelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=_lowerCAmelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=_lowerCAmelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=_lowerCAmelCase , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=_lowerCAmelCase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=_lowerCAmelCase , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=_lowerCAmelCase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
__lowercase = parser.parse_args()
return args
def __lowercase ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
def fn(_UpperCAmelCase ):
return tokenizer(examples["text"] )
return fn
def __lowercase ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
__lowercase = []
for i in range(len(tokenized_data["input_ids"] ) ):
__lowercase = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
__lowercase = tf.train.Features(feature=_lowerCAmelCase )
__lowercase = tf.train.Example(features=_lowerCAmelCase )
__lowercase = example.SerializeToString()
records.append(_lowerCAmelCase )
return records
def __lowercase ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__lowercase = min(len(_lowerCAmelCase ) , args.limit )
__lowercase = dataset.select(range(_lowerCAmelCase ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__lowercase = os.path.join(args.output_dir , args.split )
if not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
else:
__lowercase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__lowercase = tokenize_function(_lowerCAmelCase )
__lowercase = dataset.map(_lowerCAmelCase , batched=_lowerCAmelCase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_UpperCAmelCase ):
# Concatenate all texts.
__lowercase = {k: sum(examples[k] , [] ) for k in examples.keys()}
__lowercase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__lowercase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__lowercase = {
k: [t[i : i + args.max_length] for i in range(0 , _lowerCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__lowercase = dataset_tokenized.map(_lowerCAmelCase , batched=_lowerCAmelCase , batch_size=1_000 , num_proc=4 )
__lowercase = 0
__lowercase = 0
for shard in range(0 , len(_lowerCAmelCase ) , args.shard_size ):
__lowercase = grouped_dataset[shard : shard + args.shard_size]
__lowercase = len(dataset_snapshot["input_ids"] )
__lowercase = os.path.join(_lowerCAmelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
__lowercase = get_serialized_examples(_lowerCAmelCase )
with tf.io.TFRecordWriter(_lowerCAmelCase ) as out_file:
for i in range(len(_lowerCAmelCase ) ):
__lowercase = serialized_examples[i]
out_file.write(_lowerCAmelCase )
print("Wrote file {} containing {} records".format(_lowerCAmelCase , _lowerCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , "w" ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=_lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
main(args)
| 321 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCamelCase : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCamelCase : int = SeqaSeqDataset
# Get datasets
_lowerCamelCase : Tuple = (
dataset_class(
_lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowerCamelCase : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCamelCase : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCamelCase : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowerCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" )
_lowerCamelCase : Dict = data_args.n_val
_lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" )
_lowerCamelCase : Dict = test_output.metrics
_lowerCamelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCamelCase : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
_lowerCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 44 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
UpperCamelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase = model(__A )["last_hidden_state"].detach()
self.assertEqual(output.shape , __A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1e-3 ) )
@slow
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
UpperCamelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase = model(__A )["last_hidden_state"].detach()
self.assertEqual(output.shape , __A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1e-3 ) )
| 282 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A__ : int = 16
A__ : Union[str, Any] = 32
def UpperCAmelCase__ ( UpperCAmelCase_ : Accelerator , UpperCAmelCase_ : int = 16 ) -> Any:
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCamelCase : List[str] = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase_ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase : str = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase : Optional[int] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase : Optional[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase : List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase : List[Any] = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase : str = 8
else:
__lowerCamelCase : Optional[Any] = None
return tokenizer.pad(
_lowerCAmelCase , padding='longest' , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
__lowerCamelCase : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
__lowerCamelCase : List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A__ : List[Any] = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ) -> Union[str, Any]:
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _lowerCAmelCase ) == "1":
__lowerCamelCase : Union[str, Any] = 2
# Initialize accelerator
__lowerCamelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase : Tuple = config["lr"]
__lowerCamelCase : int = int(config['num_epochs'] )
__lowerCamelCase : Any = int(config['seed'] )
__lowerCamelCase : Tuple = int(config['batch_size'] )
__lowerCamelCase : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__lowerCamelCase : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCamelCase : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
__lowerCamelCase : Any = MAX_GPU_BATCH_SIZE
set_seed(_lowerCAmelCase )
__lowerCamelCase : Optional[int] = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase : Dict = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase : Dict = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
__lowerCamelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase : Optional[int] = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase : Any = model(**_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = outputs.loss
__lowerCamelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__lowerCamelCase : Union[str, Any] = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase : Any = model(**_lowerCAmelCase )
__lowerCamelCase : Dict = outputs.logits.argmax(dim=-1 )
__lowerCamelCase : int = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_lowerCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__lowerCamelCase : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
__lowerCamelCase : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _lowerCAmelCase )
def UpperCAmelCase__ ( ) -> Optional[int]:
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 13 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = int(_lowerCAmelCase )
if n_element < 1:
SCREAMING_SNAKE_CASE__ :Any = ValueError('a should be a positive number' )
raise my_error
SCREAMING_SNAKE_CASE__ :List[Any] = [1]
SCREAMING_SNAKE_CASE__ :str = (0, 0, 0)
SCREAMING_SNAKE_CASE__ :Tuple = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCamelCase_ = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
UpperCamelCase_ = hamming(int(n))
print('''-----------------------------------------------------''')
print(f"The list with nth numbers is: {hamming_numbers}")
print('''-----------------------------------------------------''')
| 209 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
"""simple docstring"""
import argparse
import os
import re
__A : int = 'src/transformers'
# Pattern that looks at the indentation in a line.
__A : int = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__A : Optional[int] = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A : Any = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__A : int = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A : List[Any] = re.compile(R"\[([^\]]+)\]")
def lowercase ( UpperCamelCase : Any ):
"""simple docstring"""
A__ : List[str] =_re_indent.search(_lowerCAmelCase )
return "" if search is None else search.groups()[0]
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : List[Any]="" , UpperCamelCase : List[str]=None , UpperCamelCase : Union[str, Any]=None ):
"""simple docstring"""
A__ : Union[str, Any] =0
A__ : Dict =code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCAmelCase ):
index += 1
A__ : List[Any] =["\n".join(lines[:index] )]
else:
A__ : Tuple =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A__ : Optional[Any] =[lines[index]]
index += 1
while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(_lowerCAmelCase ) )
if index < len(_lowerCAmelCase ) - 1:
A__ : List[str] =[lines[index + 1]]
index += 1
else:
A__ : Any =[]
else:
blocks.append("\n".join(_lowerCAmelCase ) )
A__ : Any =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCAmelCase ) > 0:
blocks.append("\n".join(_lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCAmelCase ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
def _inner(UpperCamelCase : List[str] ):
return key(_lowerCAmelCase ).lower().replace("_" , "" )
return _inner
def lowercase ( UpperCamelCase : Any , UpperCamelCase : str=None ):
"""simple docstring"""
def noop(UpperCamelCase : int ):
return x
if key is None:
A__ : Union[str, Any] =noop
# Constants are all uppercase, they go first.
A__ : List[str] =[obj for obj in objects if key(_lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A__ : Optional[int] =[obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
A__ : Optional[int] =[obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()]
A__ : Dict =ignore_underscore(_lowerCAmelCase )
return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase )
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
def _replace(UpperCamelCase : List[Any] ):
A__ : Tuple =match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
A__ : Optional[int] =[part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : Optional[Any] =keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(_lowerCAmelCase )] ) + "]"
A__ : int =import_statement.split("\n" )
if len(_lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A__ : Dict =2 if lines[1].strip() == "[" else 1
A__ : List[Any] =[(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A__ : Optional[Any] =sort_objects(_lowerCAmelCase , key=lambda UpperCamelCase : x[1] )
A__ : Optional[Any] =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A__ : List[Any] =_re_bracket_content.sub(_replace , lines[1] )
else:
A__ : int =[part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : int =keys[:-1]
A__ : str =get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(_lowerCAmelCase )] )
return "\n".join(_lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
A__ : List[Any] =_re_bracket_content.sub(_replace , _lowerCAmelCase )
return import_statement
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Dict=True ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
A__ : Tuple =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A__ : int =split_code_in_indented_blocks(
_lowerCAmelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A__ : List[Any] =main_blocks[block_idx]
A__ : Tuple =block.split("\n" )
# Get to the start of the imports.
A__ : Tuple =0
while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A__ : str =len(_lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
A__ : Dict ="\n".join(block_lines[line_idx:-1] )
A__ : int =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A__ : Dict =split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
A__ : List[str] =_re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A__ : str =[(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A__ : Optional[Any] =[(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None]
A__ : Optional[int] =[x[0] for x in sorted(_lowerCAmelCase , key=lambda UpperCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A__ : Dict =0
A__ : List[str] =[]
for i in range(len(_lowerCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
A__ : Optional[int] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
A__ : Optional[Any] ="\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCAmelCase ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(_lowerCAmelCase ) )
def lowercase ( UpperCamelCase : Optional[int]=True ):
"""simple docstring"""
A__ : Any =[]
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
A__ : Optional[Any] =sort_imports(os.path.join(_lowerCAmelCase , "__init__.py" ) , check_only=_lowerCAmelCase )
if result:
A__ : Optional[int] =[os.path.join(_lowerCAmelCase , "__init__.py" )]
if len(_lowerCAmelCase ) > 0:
raise ValueError(F'''Would overwrite {len(_lowerCAmelCase )} files, run `make style`.''' )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__A : str = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 656 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 125 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 | 0 |
def A__ ( lowerCamelCase ) -> Tuple:
if number > 0:
raise ValueError("""input must be a negative integer""" )
UpperCamelCase_: str = len(bin(_lowerCAmelCase )[3:] )
UpperCamelCase_: List[str] = bin(abs(_lowerCAmelCase ) - (1 << binary_number_length) )[3:]
UpperCamelCase_: List[str] = (
(
"1"
+ "0" * (binary_number_length - len(_lowerCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase_ : List[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
super().__init__()
a_ : Optional[Any] = torchvision.models.resnetaaa(pretrained=__A )
a_ : Optional[Any] = list(model.children() )[:-2]
a_ : Optional[Any] = nn.Sequential(*__A )
a_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
a_ : Optional[Any] = self.pool(self.model(__A ) )
a_ : Optional[Any] = torch.flatten(__A , start_dim=2 )
a_ : Optional[Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
a_ : List[Any] = [json.loads(__A ) for l in open(__A )]
a_ : Dict = os.path.dirname(__A )
a_ : Union[str, Any] = tokenizer
a_ : str = labels
a_ : int = len(__A )
a_ : str = max_seq_length
a_ : Tuple = transforms
def __len__( self : Union[str, Any] ) -> Optional[Any]:
return len(self.data )
def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
a_ : List[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=__A ) )
a_ : List[Any] = sentence[0], sentence[1:-1], sentence[-1]
a_ : Tuple = sentence[: self.max_seq_length]
a_ : Optional[int] = torch.zeros(self.n_classes )
a_ : List[Any] = 1
a_ : str = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
a_ : Optional[int] = self.transforms(__A )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
a_ : str = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Optional[Any]:
"""simple docstring"""
a_ : Optional[int] = [len(row['sentence'] ) for row in batch]
a_ : List[Any] = len(_lowerCAmelCase ), max(_lowerCAmelCase )
a_ : Tuple = torch.zeros(_lowerCAmelCase , _lowerCAmelCase , dtype=torch.long )
a_ : Tuple = torch.zeros(_lowerCAmelCase , _lowerCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowerCAmelCase , _lowerCAmelCase ) ):
a_ : Optional[int] = input_row["sentence"]
a_ : Tuple = 1
a_ : Any = torch.stack([row['image'] for row in batch] )
a_ : Union[str, Any] = torch.stack([row['label'] for row in batch] )
a_ : int = torch.stack([row['image_start_token'] for row in batch] )
a_ : Optional[Any] = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def SCREAMING_SNAKE_CASE_ ( ) -> str:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 570 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 | 0 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _A ( lowerCamelCase ):
a__ : Tuple = torch.exp(_lowerCAmelCase )
a__ : int = torch.sum(_lowerCAmelCase , dim=1 ) # sum of exp(x_i)
a__ : Optional[int] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCAmelCase ) - B / A
class __lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case ) -> Optional[int]:
"""simple docstring"""
super().__init__()
a__ : Optional[int] = config.output_attentions
a__ : Any = config.output_hidden_states
a__ : str = nn.ModuleList([BertLayer(__A ) for _ in range(config.num_hidden_layers )] )
a__ : str = nn.ModuleList([BertHighway(__A ) for _ in range(config.num_hidden_layers )] )
a__ : List[str] = [-1 for _ in range(config.num_hidden_layers )]
def _snake_case ( self , snake_case ) -> List[Any]:
"""simple docstring"""
if (type(__A ) is float) or (type(__A ) is int):
for i in range(len(self.early_exit_entropy ) ):
a__ : List[Any] = x
else:
a__ : List[str] = x
def _snake_case ( self , snake_case ) -> Tuple:
"""simple docstring"""
a__ : List[str] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _snake_case ( self , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = ()
a__ : Tuple = ()
a__ : Optional[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
a__ : int = all_hidden_states + (hidden_states,)
a__ : List[Any] = layer_module(
__A , __A , head_mask[i] , __A , __A )
a__ : Dict = layer_outputs[0]
if self.output_attentions:
a__ : str = all_attentions + (layer_outputs[1],)
a__ : List[str] = (hidden_states,)
if self.output_hidden_states:
a__ : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
a__ : Dict = current_outputs + (all_attentions,)
a__ : Any = self.highway[i](__A )
# logits, pooled_output
if not self.training:
a__ : int = highway_exit[0]
a__ : Optional[Any] = entropy(__A )
a__ : Tuple = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
a__ : List[str] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
a__ : int = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__A , i + 1 )
else:
a__ : int = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
a__ : Optional[Any] = all_hidden_states + (hidden_states,)
a__ : List[Any] = (hidden_states,)
if self.output_hidden_states:
a__ : int = outputs + (all_hidden_states,)
if self.output_attentions:
a__ : Optional[int] = outputs + (all_attentions,)
a__ : Tuple = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ ,_UpperCamelCase ,)
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case ) -> str:
"""simple docstring"""
super().__init__(__A )
a__ : str = config
a__ : str = BertEmbeddings(__A )
a__ : Tuple = DeeBertEncoder(__A )
a__ : List[str] = BertPooler(__A )
self.init_weights()
def _snake_case ( self ) -> str:
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def _snake_case ( self ) -> str:
"""simple docstring"""
return self.embeddings.word_embeddings
def _snake_case ( self , snake_case ) -> int:
"""simple docstring"""
a__ : str = value
def _snake_case ( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__A )
@add_start_docstrings_to_model_forward(__A )
def _snake_case ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ) -> Optional[Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
a__ : List[str] = input_ids.size()
elif inputs_embeds is not None:
a__ : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
a__ : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
a__ : List[str] = torch.ones(__A , device=__A )
if encoder_attention_mask is None:
a__ : List[Any] = torch.ones(__A , device=__A )
if token_type_ids is None:
a__ : List[str] = torch.zeros(__A , dtype=torch.long , device=__A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
a__ : torch.Tensor = self.get_extended_attention_mask(__A , __A , __A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
a__ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
a__ : Optional[int] = encoder_attention_mask[:, None, None, :]
a__ : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
a__ : Any = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
a__ : Union[str, Any] = self.get_head_mask(__A , self.config.num_hidden_layers )
a__ : List[Any] = self.embeddings(
input_ids=__A , position_ids=__A , token_type_ids=__A , inputs_embeds=__A )
a__ : Union[str, Any] = self.encoder(
__A , attention_mask=__A , head_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
a__ : Union[str, Any] = encoder_outputs[0]
a__ : Tuple = self.pooler(__A )
a__ : Optional[int] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case ) -> Any:
"""simple docstring"""
a__ : Tuple = message
a__ : Union[str, Any] = exit_layer # start from 1!
class __lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case ) -> Dict:
"""simple docstring"""
super().__init__()
a__ : List[Any] = BertPooler(__A )
a__ : List[Any] = nn.Dropout(config.hidden_dropout_prob )
a__ : Tuple = nn.Linear(config.hidden_size , config.num_labels )
def _snake_case ( self , snake_case ) -> int:
"""simple docstring"""
a__ : int = encoder_outputs[0]
a__ : Tuple = self.pooler(__A )
# "return" pooler_output
# BertModel
a__ : Any = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
a__ : List[Any] = bmodel_output[1]
a__ : Optional[int] = self.dropout(__A )
a__ : Optional[Any] = self.classifier(__A )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. """ ,_UpperCamelCase ,)
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(__A )
a__ : Tuple = config.num_labels
a__ : Dict = config.num_hidden_layers
a__ : Union[str, Any] = DeeBertModel(__A )
a__ : List[Any] = nn.Dropout(config.hidden_dropout_prob )
a__ : int = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__A )
def _snake_case ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=-1 , snake_case=False , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = self.num_layers
try:
a__ : Optional[Any] = self.bert(
__A , attention_mask=__A , token_type_ids=__A , position_ids=__A , head_mask=__A , inputs_embeds=__A , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
a__ : Union[str, Any] = outputs[1]
a__ : List[Any] = self.dropout(__A )
a__ : str = self.classifier(__A )
a__ : int = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
a__ : Tuple = e.message
a__ : Optional[int] = e.exit_layer
a__ : Optional[Any] = outputs[0]
if not self.training:
a__ : List[Any] = entropy(__A )
a__ : Dict = []
a__ : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
a__ : List[Any] = MSELoss()
a__ : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
a__ : Optional[int] = CrossEntropyLoss()
a__ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
a__ : Union[str, Any] = []
for highway_exit in outputs[-1]:
a__ : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
a__ : List[str] = MSELoss()
a__ : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
a__ : Optional[int] = CrossEntropyLoss()
a__ : str = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__A )
if train_highway:
a__ : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
a__ : Union[str, Any] = (loss,) + outputs
if not self.training:
a__ : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
a__ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 112 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase__ : Optional[int] = LxmertConfig.from_json_file(_lowerCAmelCase)
print(f'Building PyTorch model from configuration: {config}')
UpperCamelCase__ : str = LxmertForPreTraining(_lowerCAmelCase)
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict() , _lowerCAmelCase)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 596 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = ""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return data[1:] + data[0]
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase = ""
for i in range(len(_lowerCAmelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = int('''0b''' + data[0] + data[-1] , 2 )
__UpperCAmelCase = int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase = message[:4]
__UpperCAmelCase = message[4:]
__UpperCAmelCase = apply_table(_lowerCAmelCase , _lowerCAmelCase )
__UpperCAmelCase = xor(_lowerCAmelCase , _lowerCAmelCase )
__UpperCAmelCase = apply_sbox(_lowerCAmelCase , temp[:4] ) # noqa: E741
__UpperCAmelCase = apply_sbox(_lowerCAmelCase , temp[4:] )
__UpperCAmelCase = "0" * (2 - len(_lowerCAmelCase )) + l # noqa: E741
__UpperCAmelCase = "0" * (2 - len(_lowerCAmelCase )) + r
__UpperCAmelCase = apply_table(l + r , _lowerCAmelCase )
__UpperCAmelCase = xor(_lowerCAmelCase , _lowerCAmelCase )
return temp + right
if __name__ == "__main__":
__lowerCAmelCase : Dict = input("Enter 10 bit key: ")
__lowerCAmelCase : str = input("Enter 8 bit message: ")
__lowerCAmelCase : List[str] = [6, 3, 7, 4, 8, 5, 10, 9]
__lowerCAmelCase : List[Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__lowerCAmelCase : int = [2, 4, 3, 1]
__lowerCAmelCase : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
__lowerCAmelCase : Dict = [4, 1, 3, 5, 7, 2, 8, 6]
__lowerCAmelCase : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
__lowerCAmelCase : Optional[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__lowerCAmelCase : Optional[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__lowerCAmelCase : List[str] = apply_table(key, paa_table)
__lowerCAmelCase : Union[str, Any] = temp[:5]
__lowerCAmelCase : Union[str, Any] = temp[5:]
__lowerCAmelCase : Optional[int] = left_shift(left)
__lowerCAmelCase : Optional[int] = left_shift(right)
__lowerCAmelCase : Optional[Any] = apply_table(left + right, pa_table)
__lowerCAmelCase : Optional[Any] = left_shift(left)
__lowerCAmelCase : Optional[Any] = left_shift(right)
__lowerCAmelCase : Union[str, Any] = left_shift(left)
__lowerCAmelCase : List[str] = left_shift(right)
__lowerCAmelCase : List[Any] = apply_table(left + right, pa_table)
# encryption
__lowerCAmelCase : Optional[Any] = apply_table(message, IP)
__lowerCAmelCase : int = function(expansion, sa, sa, keya, temp)
__lowerCAmelCase : Tuple = temp[4:] + temp[:4]
__lowerCAmelCase : Optional[Any] = function(expansion, sa, sa, keya, temp)
__lowerCAmelCase : str = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__lowerCAmelCase : Union[str, Any] = apply_table(CT, IP)
__lowerCAmelCase : Any = function(expansion, sa, sa, keya, temp)
__lowerCAmelCase : Optional[int] = temp[4:] + temp[:4]
__lowerCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
__lowerCAmelCase : Optional[Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 262 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowerCAmelCase__ = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """facebook/nllb-200-distilled-600M"""
__lowerCAmelCase = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`."""
)
__lowerCAmelCase = """translator"""
__lowerCAmelCase = AutoTokenizer
__lowerCAmelCase = AutoModelForSeqaSeqLM
__lowerCAmelCase = LANGUAGE_CODES
__lowerCAmelCase = ["""text""", """text""", """text"""]
__lowerCAmelCase = ["""text"""]
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
__lowercase = self.lang_to_code[src_lang]
__lowercase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__A , return_tensors="pt" , src_lang=__A , tgt_lang=__A )
def snake_case__ ( self , lowerCAmelCase_ ):
return self.model.generate(**__A )
def snake_case__ ( self , lowerCAmelCase_ ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__A )
| 321 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 44 | 0 |
from __future__ import annotations
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , ) -> Optional[Any]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['transformers', 'torch', 'note_seq']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : str,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Dict,*__A : Dict,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
| 44 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> str:
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) -> Dict:
__lowerCamelCase : str = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__lowerCamelCase : int = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
__lowerCamelCase : List[Any] = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
__lowerCamelCase : List[str] = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
__lowerCamelCase : int = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
__lowerCamelCase : Union[str, Any] = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
__lowerCamelCase : Any = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
__lowerCamelCase : str = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
__lowerCamelCase : Optional[Any] = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
__lowerCamelCase : int = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
__lowerCamelCase : Union[str, Any] = key.replace('image_encoder.module' , 'flava.image_model' )
__lowerCamelCase : List[Any] = key.replace('text_encoder.module' , 'flava.text_model' )
__lowerCamelCase : Union[str, Any] = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
__lowerCamelCase : Any = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
__lowerCamelCase : Optional[Any] = key.replace('text_projection' , 'flava.text_projection' )
__lowerCamelCase : Union[str, Any] = key.replace('image_projection' , 'flava.image_projection' )
__lowerCamelCase : Dict = value.float()
for key, value in codebook_state_dict.items():
__lowerCamelCase : Union[str, Any] = value
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=None ) -> Optional[int]:
if config_path is not None:
__lowerCamelCase : Optional[Any] = FlavaConfig.from_pretrained(_lowerCAmelCase )
else:
__lowerCamelCase : List[Any] = FlavaConfig()
__lowerCamelCase : Union[str, Any] = FlavaForPreTraining(_lowerCAmelCase ).eval()
__lowerCamelCase : List[Any] = convert_dalle_checkpoint(_lowerCAmelCase , _lowerCAmelCase , save_checkpoint=_lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ):
__lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location='cpu' )
else:
__lowerCamelCase : Tuple = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location='cpu' )
__lowerCamelCase : Union[str, Any] = upgrade_state_dict(_lowerCAmelCase , _lowerCAmelCase )
hf_model.load_state_dict(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = hf_model.state_dict()
__lowerCamelCase : Dict = count_parameters(_lowerCAmelCase )
__lowerCamelCase : Tuple = count_parameters(_lowerCAmelCase ) + count_parameters(_lowerCAmelCase )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
A__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
A__ : Tuple = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 13 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass
| 44 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCamelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = {}
with open(_lowerCAmelCase , 'r' ) as file:
for line_number, line in enumerate(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = line.strip()
if line:
SCREAMING_SNAKE_CASE__ :int = line.split()
SCREAMING_SNAKE_CASE__ :Optional[Any] = line_number
SCREAMING_SNAKE_CASE__ :Tuple = words[0]
SCREAMING_SNAKE_CASE__ :Optional[int] = value
return result
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ :Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :List[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ :str = PARAM_MAPPING[full_name.split('.' )[-1]]
SCREAMING_SNAKE_CASE__ :Any = "param"
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ :Optional[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ :List[Any] = hf_pointer
for attribute in hf_param_name.split('.' ):
SCREAMING_SNAKE_CASE__ :Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :List[str] = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE__ :Optional[int] = value[0]
else:
SCREAMING_SNAKE_CASE__ :str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ :Union[str, Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ :List[str] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ :Optional[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ :Optional[int] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
SCREAMING_SNAKE_CASE__ :Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :str = value
else:
SCREAMING_SNAKE_CASE__ :Optional[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ :str = PARAM_MAPPING[full_name.split('.' )[-1]]
SCREAMING_SNAKE_CASE__ :List[Any] = "param"
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ :str = ".".join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE__ :str = key
SCREAMING_SNAKE_CASE__ :Union[str, Any] = value if "lm_head" in full_key else value[0]
UpperCamelCase_ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ :Dict = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE__ :List[str] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ :Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ :Tuple = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ :List[Any] = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ :int = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE__ :List[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ :int = "weight"
else:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = None
if hf_dict is not None:
rename_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return is_used
return is_used
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = []
SCREAMING_SNAKE_CASE__ :str = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ :List[str] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ :str = True
else:
SCREAMING_SNAKE_CASE__ :str = load_wavaveca_layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ :List[Any] = name.split('.' )
SCREAMING_SNAKE_CASE__ :Optional[Any] = int(items[0] )
SCREAMING_SNAKE_CASE__ :List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ :List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ :Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ :int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : int=False ) -> Optional[int]:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ :Tuple = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ :List[str] = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE__ :Tuple = read_txt_into_dict(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :str = idalabel
SCREAMING_SNAKE_CASE__ :int = WavaVecaForSequenceClassification(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
feature_extractor.save_pretrained(_lowerCAmelCase )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ :List[str] = Dictionary.load(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ :Optional[int] = target_dict.pad_index
SCREAMING_SNAKE_CASE__ :str = target_dict.bos_index
SCREAMING_SNAKE_CASE__ :Tuple = target_dict.eos_index
SCREAMING_SNAKE_CASE__ :Optional[Any] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ :Optional[int] = os.path.join(_lowerCAmelCase , 'vocab.json' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ :str = 0
SCREAMING_SNAKE_CASE__ :List[str] = 1
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :Tuple = WavaVecaCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True if config.feat_extract_norm == "layer" else False
SCREAMING_SNAKE_CASE__ :Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ :Any = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :List[str] = WavaVecaForCTC(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ :List[str] = WavaVecaForPreTraining(_lowerCAmelCase )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE__ :Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ :str = argparse.Namespace(task='audio_pretraining' )
SCREAMING_SNAKE_CASE__ :Optional[Any] = fairseq.tasks.setup_task(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :Optional[Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 209 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
with open(__A , encoding="utf-8" ) as input_file:
A__ : List[str] =re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
A__ : List[str] =input_file.read()
A__ : Union[str, Any] =regexp.search(__A )
return match
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ):
with open(__A , encoding="utf-8" ) as input_file:
A__ : List[Any] =re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
A__ : int =input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
A__ : Optional[Any] =regexp.finditer(__A )
A__ : Optional[Any] =[match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _UpperCAmelCase ( self : Tuple ):
A__ : str =Path("./datasets" )
A__ : Optional[Any] =list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__A ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def _UpperCAmelCase ( self : List[Any] ):
A__ : Any =Path("./datasets" )
A__ : Union[str, Any] =list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__A ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 656 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,UpperCamelCase : Union[str, Any] ,UpperCamelCase : Optional[int]=7 ,UpperCamelCase : Optional[Any]=3 ,UpperCamelCase : List[str]=18 ,UpperCamelCase : Any=30 ,UpperCamelCase : str=400 ,UpperCamelCase : str=True ,UpperCamelCase : Tuple=None ,UpperCamelCase : Tuple=True ,UpperCamelCase : List[str]=None ,UpperCamelCase : List[str]=True ,) -> Optional[int]:
_lowercase : List[str] = size if size is not None else {"shortest_edge": 20}
_lowercase : str = crop_size if crop_size is not None else {"height": 18, "width": 18}
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Tuple = num_channels
_lowercase : Optional[int] = image_size
_lowercase : Optional[Any] = min_resolution
_lowercase : List[Any] = max_resolution
_lowercase : Any = do_resize
_lowercase : int = size
_lowercase : Optional[Any] = do_center_crop
_lowercase : Optional[Any] = crop_size
_lowercase : List[Any] = do_flip_channel_order
def _lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : str = MobileViTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : List[Any] ) -> Any:
_lowercase : List[str] = MobileViTImageProcessingTester(self )
@property
def _lowerCamelCase ( self : List[str] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Any ) -> List[str]:
_lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'size' ) )
self.assertTrue(hasattr(__A ,'do_center_crop' ) )
self.assertTrue(hasattr(__A ,'center_crop' ) )
self.assertTrue(hasattr(__A ,'do_flip_channel_order' ) )
def _lowerCamelCase ( self : Dict ) -> List[str]:
_lowercase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
_lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def _lowerCamelCase ( self : Dict ) -> List[Any]:
pass
def _lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
# Initialize image_processing
_lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase : int = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowercase : Optional[Any] = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def _lowerCamelCase ( self : str ) -> Any:
# Initialize image_processing
_lowercase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase : Dict = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowercase : Optional[Any] = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def _lowerCamelCase ( self : Dict ) -> int:
# Initialize image_processing
_lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase : str = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowercase : Optional[Any] = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 125 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCamelCase_ : Union[str, Any] = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str , snake_case_ : str , snake_case_ : bool , snake_case_ : str = None , snake_case_ : list = None ):
UpperCamelCase_: List[str] = None
UpperCamelCase_: Optional[Any] = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
UpperCamelCase_: Optional[Any] = os.path.abspath("""examples""" )
for item in os.listdir(__A ):
if item not in EXCLUDE_EXAMPLES:
UpperCamelCase_: Any = os.path.join(__A , __A )
if os.path.isfile(__A ) and ".py" in item_path:
with self.subTest(
tested_script=__A , feature_script=__A , tested_section="""main()""" if parser_only else """training_function()""" , ):
UpperCamelCase_: List[Any] = compare_against_test(
os.path.join(__A , __A ) , __A , __A , __A )
UpperCamelCase_: Optional[Any] = "\n".join(__A )
if special_strings is not None:
for string in special_strings:
UpperCamelCase_: Union[str, Any] = diff.replace(__A , """""" )
self.assertEqual(__A , """""" )
def lowerCAmelCase__ ( self : Union[str, Any] ):
self.one_complete_example("""complete_nlp_example.py""" , __A )
self.one_complete_example("""complete_nlp_example.py""" , __A )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[str] = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
UpperCamelCase_: Tuple = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("""complete_cv_example.py""" , __A , __A , __A )
self.one_complete_example("""complete_cv_example.py""" , __A , __A , __A )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Dict = False
@classmethod
def lowerCAmelCase__ ( cls : int ):
super().setUpClass()
UpperCamelCase_: Tuple = tempfile.mkdtemp()
UpperCamelCase_: List[Any] = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase_: str = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def lowerCAmelCase__ ( cls : Tuple ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = f'''\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: List[str] = f'''\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '''.split()
UpperCamelCase_: List[str] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[Any] = f'''\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '''.split()
UpperCamelCase_: Optional[Any] = run_command(self._launch_args + testargs , return_stdout=__A )
self.assertNotIn("""epoch 0:""" , __A )
self.assertIn("""epoch 1:""" , __A )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Any = f'''\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '''.split()
UpperCamelCase_: List[Any] = run_command(self._launch_args + testargs , return_stdout=__A )
if torch.cuda.is_available():
UpperCamelCase_: Union[str, Any] = torch.cuda.device_count()
else:
UpperCamelCase_: Any = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , __A )
self.assertIn("""epoch 1:""" , __A )
else:
self.assertIn("""epoch 0:""" , __A )
self.assertIn("""epoch 1:""" , __A )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
UpperCamelCase_: Optional[Any] = run_command(self._launch_args + testargs , return_stdout=__A )
UpperCamelCase_: List[Any] = re.findall("""({.+})""" , __A )
UpperCamelCase_: int = [r for r in results if "accuracy" in r][-1]
UpperCamelCase_: Optional[Any] = ast.literal_eval(__A )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Tuple = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
UpperCamelCase_: Tuple = f'''\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__A , """tracking""" ) ) )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[str] = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Optional[int] = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 548 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Any,__A : List[Any]=8_0,__A : Dict=1_6_0_0_0,__A : Tuple=0.0,__A : Dict=1_0,__A : int=2_5,__A : Union[str, Any]="hamming_window",__A : List[str]=32768.0,__A : Union[str, Any]=0.97,__A : str=1.0,__A : Union[str, Any]=True,__A : Tuple=True,__A : Optional[Any]=False,**__A : Optional[Any],):
super().__init__(feature_size=__A,sampling_rate=__A,padding_value=__A,**__A )
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Any = padding_value
_lowerCamelCase : Dict = hop_length
_lowerCamelCase : Tuple = win_length
_lowerCamelCase : str = frame_signal_scale
_lowerCamelCase : List[str] = preemphasis_coeff
_lowerCamelCase : List[str] = mel_floor
_lowerCamelCase : str = normalize_means
_lowerCamelCase : Any = normalize_vars
_lowerCamelCase : List[str] = win_function
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Dict = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self : Any,__A : np.array ):
if self.win_function == "hamming_window":
_lowerCamelCase : Any = window_function(window_length=self.sample_size,name=self.win_function,periodic=__A )
else:
_lowerCamelCase : Optional[int] = window_function(window_length=self.sample_size,name=self.win_function )
_lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
_lowerCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale,window=__A,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__A,preemphasis=self.preemphasis_coeff,mel_filters=__A,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Dict,__A : int ):
# make sure we normalize float32 arrays
if self.normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_lowerCamelCase : Optional[int] = np.subtract(__A,__A )
if self.normalize_vars:
_lowerCamelCase : int = x[:input_length].std(axis=0 )
_lowerCamelCase : Any = np.divide(__A,__A )
if input_length < x.shape[0]:
_lowerCamelCase : Tuple = padding_value
# make sure array is in float32
_lowerCamelCase : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self : Any,__A : List[np.ndarray],__A : Optional[np.ndarray] = None ):
_lowerCamelCase : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A,__A,self.padding_value ) for x, n in zip(__A,__A )]
def __call__( self : Optional[Any],__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : Union[bool, str, PaddingStrategy] = False,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[int] = None,**__A : Optional[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : List[str] = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray(__A,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [raw_speech]
# extract fbank features
_lowerCamelCase : str = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : Union[str, Any] = BatchFeature({"input_features": features} )
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=__A,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=__A,**__A,)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__A ):
_lowerCamelCase : int = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
_lowerCamelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCamelCase : Dict = [np.asarray(__A,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCamelCase : Dict = (
np.array(__A,dtype=np.intaa )
if self._get_padding_strategies(__A,max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["input_features"],attention_mask=__A )
if return_tensors is not None:
_lowerCamelCase : Dict = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 44 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def SCREAMING_SNAKE_CASE_ ( __A : Callable[[int | float], int | float] , __A : int | float , __A : int | float , __A : int = 1_00 , ) -> Union[str, Any]:
"""simple docstring"""
a_ : Optional[Any] = x_start
a_ : List[str] = fnc(_lowerCAmelCase )
a_ : Optional[int] = 0.0
for _ in range(_lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
a_ : Optional[Any] = (x_end - x_start) / steps + xa
a_ : List[str] = fnc(_lowerCAmelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
a_ : Any = xa
a_ : Union[str, Any] = fxa
return length
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> str:
"""simple docstring"""
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
UpperCAmelCase_ : Union[str, Any] = 10
while i <= 10_0000:
print(F'With {i} steps: {line_length(f, -10, 10, i)}')
i *= 10
| 570 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 0 |
from typing import Dict, Optional
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ : List[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
SCREAMING_SNAKE_CASE__ : Any = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
SCREAMING_SNAKE_CASE__ : Tuple = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
a__ : Any = new_id
# turn into Numpy arrays
a__ : str = np.array(_lowerCAmelCase )
a__ : List[str] = np.array(_lowerCAmelCase )
if reduce_labels:
a__ : Optional[int] = 255
a__ : Tuple = label - 1
a__ : List[Any] = 255
a__ : int = label != ignore_index
a__ : str = np.not_equal(_lowerCAmelCase , _lowerCAmelCase )
a__ : List[str] = pred_label[mask]
a__ : str = np.array(_lowerCAmelCase )[mask]
a__ : Union[str, Any] = pred_label[pred_label == label]
a__ : int = np.histogram(_lowerCAmelCase , bins=_lowerCAmelCase , range=(0, num_labels - 1) )[0]
a__ : Dict = np.histogram(_lowerCAmelCase , bins=_lowerCAmelCase , range=(0, num_labels - 1) )[0]
a__ : Tuple = np.histogram(_lowerCAmelCase , bins=_lowerCAmelCase , range=(0, num_labels - 1) )[0]
a__ : List[str] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False , ):
a__ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
a__ : Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa )
a__ : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa )
a__ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_lowerCAmelCase , _lowerCAmelCase ):
a__ : Optional[int] = intersect_and_union(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , ):
a__ : int = total_intersect_and_union(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# compute metrics
a__ : Tuple = {}
a__ : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
a__ : Optional[Any] = total_area_intersect / total_area_union
a__ : str = total_area_intersect / total_area_label
a__ : Union[str, Any] = np.nanmean(_lowerCAmelCase )
a__ : Dict = np.nanmean(_lowerCAmelCase )
a__ : Any = all_acc
a__ : Tuple = iou
a__ : str = acc
if nan_to_num is not None:
a__ : str = {metric: np.nan_to_num(_lowerCAmelCase , nan=_lowerCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case = None , snake_case = None , snake_case = False , ) -> Tuple:
"""simple docstring"""
a__ : Optional[int] = mean_iou(
results=__A , gt_seg_maps=__A , num_labels=__A , ignore_index=__A , nan_to_num=__A , label_map=__A , reduce_labels=__A , )
return iou_result
| 112 |
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True) -> Any:
print(f'Converting {name}...')
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
UpperCamelCase__ : Any = timm.create_model('levit_128s' , pretrained=_lowerCAmelCase)
else:
UpperCamelCase__ : Tuple = timm.create_model('levit_128' , pretrained=_lowerCAmelCase)
if hidden_sizes == 192:
UpperCamelCase__ : str = timm.create_model('levit_192' , pretrained=_lowerCAmelCase)
if hidden_sizes == 256:
UpperCamelCase__ : str = timm.create_model('levit_256' , pretrained=_lowerCAmelCase)
if hidden_sizes == 384:
UpperCamelCase__ : Tuple = timm.create_model('levit_384' , pretrained=_lowerCAmelCase)
from_model.eval()
UpperCamelCase__ : Any = LevitForImageClassificationWithTeacher(_lowerCAmelCase).eval()
UpperCamelCase__ : Union[str, Any] = OrderedDict()
UpperCamelCase__ : Tuple = from_model.state_dict()
UpperCamelCase__ : Union[str, Any] = list(from_model.state_dict().keys())
UpperCamelCase__ : List[Any] = list(our_model.state_dict().keys())
print(len(_lowerCAmelCase) , len(_lowerCAmelCase))
for i in range(len(_lowerCAmelCase)):
UpperCamelCase__ : str = weights[og_keys[i]]
our_model.load_state_dict(_lowerCAmelCase)
UpperCamelCase__ : str = torch.randn((2, 3, 224, 224))
UpperCamelCase__ : Any = from_model(_lowerCAmelCase)
UpperCamelCase__ : List[Any] = our_model(_lowerCAmelCase).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase), "The model logits don't match the original one."
UpperCamelCase__ : Dict = name
print(_lowerCAmelCase)
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name)
UpperCamelCase__ : List[str] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name)
print(f'Pushed {checkpoint_name}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = True) -> int:
UpperCamelCase__ : Union[str, Any] = "imagenet-1k-id2label.json"
UpperCamelCase__ : Union[str, Any] = 1_000
UpperCamelCase__ : str = (1, num_labels)
UpperCamelCase__ : List[str] = "huggingface/label-files"
UpperCamelCase__ : Optional[int] = num_labels
UpperCamelCase__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset') , 'r'))
UpperCamelCase__ : Union[str, Any] = {int(_lowerCAmelCase): v for k, v in idalabel.items()}
UpperCamelCase__ : str = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase)
UpperCamelCase__ : Any = {
"levit-128S": 128,
"levit-128": 128,
"levit-192": 192,
"levit-256": 256,
"levit-384": 384,
}
UpperCamelCase__ : str = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _lowerCAmelCase , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 596 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase )
_lowerCamelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 847
_lowerCamelCase : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_lowerCamelCase : Optional[int] = 150
_lowerCamelCase : Union[str, Any] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase : Union[str, Any] = 171
_lowerCamelCase : str = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_lowerCamelCase : Optional[int] = 133
_lowerCamelCase : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase : str = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 65
_lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json"
_lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: dim]
_lowerCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : Any = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_lowerCamelCase : str = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : int = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_lowerCamelCase : Any = prepare_img()
if "vistas" in model_name:
_lowerCamelCase : Any = 65
elif "cityscapes" in model_name:
_lowerCamelCase : Optional[Any] = 65535
else:
_lowerCamelCase : str = 255
_lowerCamelCase : List[str] = True if "ade" in model_name else False
_lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
_lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 44 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 262 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowercase ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if is_torch_version("<" , "2.0.0" ) or not hasattr(_lowerCAmelCase , "_dynamo" ):
return False
return isinstance(_lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase = True ) -> List[Any]:
'''simple docstring'''
__lowercase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowercase = is_compiled_module(_lowerCAmelCase )
if is_compiled:
__lowercase = model
__lowercase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = model.module
if not keep_fpaa_wrapper:
__lowercase = getattr(_lowerCAmelCase , "forward" )
__lowercase = model.__dict__.pop("_original_forward" , _lowerCAmelCase )
if original_forward is not None:
while hasattr(_lowerCAmelCase , "__wrapped__" ):
__lowercase = forward.__wrapped__
if forward == original_forward:
break
__lowercase = forward
if getattr(_lowerCAmelCase , "_converted_to_transformer_engine" , _lowerCAmelCase ):
convert_model(_lowerCAmelCase , to_transformer_engine=_lowerCAmelCase )
if is_compiled:
__lowercase = model
__lowercase = compiled_model
return model
def __lowercase ( ) -> Any:
'''simple docstring'''
PartialState().wait_for_everyone()
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowerCAmelCase , _lowerCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(_lowerCAmelCase , _lowerCAmelCase )
@contextmanager
def __lowercase ( **_UpperCAmelCase ) -> str:
'''simple docstring'''
for key, value in kwargs.items():
__lowercase = str(_lowerCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowercase ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if not hasattr(_lowerCAmelCase , "__qualname__" ) and not hasattr(_lowerCAmelCase , "__name__" ):
__lowercase = getattr(_lowerCAmelCase , "__class__" , _lowerCAmelCase )
if hasattr(_lowerCAmelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(_lowerCAmelCase , "__name__" ):
return obj.__name__
return str(_lowerCAmelCase )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key, value in source.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = destination.setdefault(_lowerCAmelCase , {} )
merge_dicts(_lowerCAmelCase , _lowerCAmelCase )
else:
__lowercase = value
return destination
def __lowercase ( _UpperCAmelCase = None ) -> str:
'''simple docstring'''
if port is None:
__lowercase = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 321 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCamelCase : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCamelCase : int = SeqaSeqDataset
# Get datasets
_lowerCamelCase : Tuple = (
dataset_class(
_lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowerCamelCase : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCamelCase : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCamelCase : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowerCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" )
_lowerCamelCase : Dict = data_args.n_val
_lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" )
_lowerCamelCase : Dict = test_output.metrics
_lowerCamelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCamelCase : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
_lowerCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 44 | 0 |
_snake_case = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 282 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44 | 0 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=False ) -> Any:
try:
__lowerCamelCase : Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase : str = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase : Optional[int] = strtobool(_lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
A__ : Any = parse_flag_from_env("""RUN_SLOW""", default=False)
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> Union[str, Any]:
return unittest.skip('Test was skipped' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Dict:
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> int:
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] ) -> List[Any]:
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> Dict:
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple ) -> str:
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple ) -> List[str]:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple ) -> Dict:
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple ) -> str:
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] ) -> Optional[Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple ) -> List[str]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> Dict:
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] ) -> List[str]:
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> Any:
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> List[str]:
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[int]=None ) -> List[Any]:
if test_case is None:
return partial(_lowerCAmelCase , version=_lowerCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , _lowerCAmelCase ) , F'test requires torch version >= {version}' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> str:
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] ) -> Any:
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str:
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_lowerCAmelCase )
A__ : Optional[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCAmelCase__ ( UpperCAmelCase_ : Dict ) -> List[str]:
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_lowerCAmelCase )
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict = True
@classmethod
def lowercase_ ( cls ) -> List[str]:
__lowerCamelCase : List[str] = tempfile.mkdtemp()
@classmethod
def lowercase_ ( cls ) -> List[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowercase_ ( self ) -> int:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__A )
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Tuple = mocks if isinstance(__A , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def UpperCAmelCase__ ( UpperCAmelCase_ : Any ) -> int:
__lowerCamelCase : Tuple = AcceleratorState()
__lowerCamelCase : str = tensor[None].clone().to(state.device )
__lowerCamelCase : List[Any] = gather(_lowerCAmelCase ).cpu()
__lowerCamelCase : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _lowerCAmelCase ):
return False
return True
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Tuple = returncode
__lowerCamelCase : List[str] = stdout
__lowerCamelCase : Any = stderr
async def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ) -> Any:
while True:
__lowerCamelCase : Optional[Any] = await stream.readline()
if line:
callback(_lowerCAmelCase )
else:
break
async def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Optional[int]=False ) -> int:
if echo:
print('\nRunning: ' , ' '.join(_lowerCAmelCase ) )
__lowerCamelCase : List[str] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase : List[Any] = []
__lowerCamelCase : List[str] = []
def tee(UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int="" ):
__lowerCamelCase : Optional[Any] = line.decode('utf-8' ).rstrip()
sink.append(_lowerCAmelCase )
if not quiet:
print(_lowerCAmelCase , _lowerCAmelCase , file=_lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda UpperCAmelCase_ : tee(_lowerCAmelCase , _lowerCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda UpperCAmelCase_ : tee(_lowerCAmelCase , _lowerCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_lowerCAmelCase , )
return _RunOutput(await p.wait() , _lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=1_80 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Optional[Any]=True ) -> int:
__lowerCamelCase : Dict = asyncio.get_event_loop()
__lowerCamelCase : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCAmelCase , env=_lowerCAmelCase , stdin=_lowerCAmelCase , timeout=_lowerCAmelCase , quiet=_lowerCAmelCase , echo=_lowerCAmelCase ) )
__lowerCamelCase : List[str] = " ".join(_lowerCAmelCase )
if result.returncode > 0:
__lowerCamelCase : int = "\n".join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
return result
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple=False ) -> Optional[Any]:
try:
__lowerCamelCase : Optional[Any] = subprocess.check_output(_lowerCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_lowerCAmelCase , 'decode' ):
__lowerCamelCase : List[str] = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'Command `{" ".join(_lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 13 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : Tuple = 'codegen'
A_ : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Dict , UpperCamelCase_ : Optional[int]=5_04_00 , UpperCamelCase_ : Optional[int]=20_48 , UpperCamelCase_ : Optional[Any]=20_48 , UpperCamelCase_ : Optional[int]=40_96 , UpperCamelCase_ : List[Any]=28 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : int=64 , UpperCamelCase_ : str=None , UpperCamelCase_ : Dict="gelu_new" , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : List[Any]=1e-5 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : List[str]=5_02_56 , UpperCamelCase_ : Optional[int]=5_02_56 , UpperCamelCase_ : int=False , **UpperCamelCase_ : List[Any] , ) -> Dict:
SCREAMING_SNAKE_CASE__ :Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ :str = n_ctx
SCREAMING_SNAKE_CASE__ :Optional[int] = n_positions
SCREAMING_SNAKE_CASE__ :Any = n_embd
SCREAMING_SNAKE_CASE__ :Dict = n_layer
SCREAMING_SNAKE_CASE__ :Tuple = n_head
SCREAMING_SNAKE_CASE__ :str = n_inner
SCREAMING_SNAKE_CASE__ :List[str] = rotary_dim
SCREAMING_SNAKE_CASE__ :str = activation_function
SCREAMING_SNAKE_CASE__ :Any = resid_pdrop
SCREAMING_SNAKE_CASE__ :List[Any] = embd_pdrop
SCREAMING_SNAKE_CASE__ :Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE__ :Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ :Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ :Dict = use_cache
SCREAMING_SNAKE_CASE__ :str = bos_token_id
SCREAMING_SNAKE_CASE__ :Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A )
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
def __init__( self : Any , UpperCamelCase_ : PretrainedConfig , UpperCamelCase_ : str = "default" , UpperCamelCase_ : List[PatchingSpec] = None , UpperCamelCase_ : bool = False , ) -> Tuple:
super().__init__(__A , task=__A , patching_specs=__A , use_past=__A )
if not getattr(self._config , 'pad_token_id' , __A ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE__ :Optional[Any] = 0
@property
def __lowerCamelCase ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ :List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__A , direction='inputs' )
SCREAMING_SNAKE_CASE__ :List[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE__ :Optional[int] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __lowerCamelCase ( self : int ) -> Tuple:
return self._config.n_layer
@property
def __lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
return self._config.n_head
def __lowerCamelCase ( self : Any , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ) -> int:
SCREAMING_SNAKE_CASE__ :Any = super(__A , self ).generate_dummy_inputs(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE__ :Tuple = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE__ :Any = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ :List[Any] = seqlen + 2
SCREAMING_SNAKE_CASE__ :Dict = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE__ :Dict = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE__ :Dict = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE__ :Optional[int] = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE__ :str = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
return 13
| 209 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Any = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : int = """luke"""
def __init__( self : Tuple , UpperCamelCase__ : str=50267 , UpperCamelCase__ : List[Any]=500000 , UpperCamelCase__ : int=768 , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Union[str, Any]=1E-12 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : Union[str, Any]=2 , **UpperCamelCase__ : Any , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
A__ : str =vocab_size
A__ : List[str] =entity_vocab_size
A__ : List[Any] =hidden_size
A__ : Dict =entity_emb_size
A__ : str =num_hidden_layers
A__ : str =num_attention_heads
A__ : str =hidden_act
A__ : str =intermediate_size
A__ : Union[str, Any] =hidden_dropout_prob
A__ : int =attention_probs_dropout_prob
A__ : Optional[Any] =max_position_embeddings
A__ : Optional[Any] =type_vocab_size
A__ : Tuple =initializer_range
A__ : int =layer_norm_eps
A__ : Tuple =use_entity_aware_attention
A__ : Dict =classifier_dropout
| 656 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 | 0 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __SCREAMING_SNAKE_CASE ( yaml.SafeLoader ):
'''simple docstring'''
def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : int ) -> Dict:
_lowercase : List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
_lowercase : Optional[int] = [tuple(__A ) if isinstance(__A ,__A ) else key for key in keys]
_lowercase : List[str] = Counter(__A )
_lowercase : List[Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def _lowerCamelCase ( self : int ,UpperCamelCase : List[Any] ,UpperCamelCase : Tuple=False ) -> Optional[int]:
_lowercase : List[str] = super().construct_mapping(__A ,deep=__A )
self._check_no_duplicates_on_constructed_node(__A )
return mapping
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : str) -> Optional[int]:
'''simple docstring'''
_lowercase : Any = list(readme_content.splitlines())
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_lowercase : List[Any] = full_content[1:].index('---') + 1
_lowercase : Dict = "\n".join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(_lowerCAmelCase)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def _lowerCamelCase ( cls : List[str] ,UpperCamelCase : Path ) -> Any:
with open(__A ,encoding='utf-8' ) as readme_file:
_lowercase : Optional[int] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__A )
else:
return cls()
def _lowerCamelCase ( self : str ,UpperCamelCase : Path ) -> int:
if path.exists():
with open(__A ,encoding='utf-8' ) as readme_file:
_lowercase : Tuple = readme_file.read()
else:
_lowercase : Optional[Any] = None
_lowercase : Union[str, Any] = self._to_readme(__A )
with open(__A ,'w' ,encoding='utf-8' ) as readme_file:
readme_file.write(__A )
def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : Optional[str] = None ) -> Optional[Any]:
if readme_content is not None:
_lowercase : Union[str, Any] = _split_yaml_from_readme(__A )
_lowercase : List[str] = "---\n" + self.to_yaml_string() + "---\n" + content
else:
_lowercase : List[str] = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def _lowerCamelCase ( cls : List[Any] ,UpperCamelCase : str ) -> str:
_lowercase : Any = yaml.load(__A ,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_lowercase : Optional[Any] = {
(key.replace('-' ,'_' ) if key.replace('-' ,'_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__A )
def _lowerCamelCase ( self : str ) -> Union[str, Any]:
return yaml.safe_dump(
{
(key.replace('_' ,'-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} ,sort_keys=__A ,allow_unicode=__A ,encoding='utf-8' ,).decode('utf-8' )
A = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
A = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
A = ap.parse_args()
A = Path(args.readme_filepath)
A = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 125 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 | 0 |
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 548 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 0 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase_ : str = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase_ : Optional[int] = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCAmelCase_ : str = model.state_dict()
UpperCAmelCase_ : Union[str, Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase_ : Tuple = state_dict[F'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
UpperCAmelCase_ : str = state_dict[F'{prefix}.embeddings.LayerNorm.{w}']
UpperCAmelCase_ : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCAmelCase_ : str = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
UpperCAmelCase_ : int = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
UpperCAmelCase_ : Tuple = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
UpperCAmelCase_ : int = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
UpperCAmelCase_ : List[Any] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
UpperCAmelCase_ : List[str] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
UpperCAmelCase_ : Optional[int] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
UpperCAmelCase_ : Optional[Any] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
UpperCAmelCase_ : str = state_dict['cls.predictions.decoder.weight']
UpperCAmelCase_ : List[str] = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase_ : Any = state_dict[F'cls.predictions.transform.dense.{w}']
UpperCAmelCase_ : List[Any] = state_dict[F'cls.predictions.transform.LayerNorm.{w}']
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 570 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 112 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ = '\\n\n'
lowerCAmelCase__ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase__ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
def __UpperCamelCase ( self : Optional[int]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string'),
}) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int = 16 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : int=None):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase__ : int = "cuda"
else:
UpperCamelCase__ : int = "cuda" if torch.cuda.is_available() else "cpu"
UpperCamelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(__A)
UpperCamelCase__ : Tuple = model.to(__A)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(__A)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase__ : Tuple = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(__A) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase__ : int = model.config.max_length - 1
else:
UpperCamelCase__ : Tuple = model.config.max_length
UpperCamelCase__ : int = tokenizer(
__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , return_tensors='pt' , return_attention_mask=__A , ).to(__A)
UpperCamelCase__ : Optional[Any] = encodings["input_ids"]
UpperCamelCase__ : Dict = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase__ : str = []
UpperCamelCase__ : str = CrossEntropyLoss(reduction='none')
for start_index in logging.tqdm(range(0 , len(__A) , __A)):
UpperCamelCase__ : int = min(start_index + batch_size , len(__A))
UpperCamelCase__ : int = encoded_texts[start_index:end_index]
UpperCamelCase__ : Optional[Any] = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase__ : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(__A)
UpperCamelCase__ : Optional[int] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1)
UpperCamelCase__ : List[Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(__A), attn_mask] , dim=1)
UpperCamelCase__ : Dict = encoded_batch
with torch.no_grad():
UpperCamelCase__ : str = model(__A , attention_mask=__A).logits
UpperCamelCase__ : List[str] = out_logits[..., :-1, :].contiguous()
UpperCamelCase__ : Optional[int] = labels[..., 1:].contiguous()
UpperCamelCase__ : Dict = attn_mask[..., 1:].contiguous()
UpperCamelCase__ : str = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2) , __A) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__A)}
| 596 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCAmelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
__UpperCAmelCase = s_dict.pop(_lowerCAmelCase )
elif "subsample" in key:
__UpperCAmelCase = s_dict.pop(_lowerCAmelCase )
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = emb.weight.shape
__UpperCAmelCase = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
__UpperCAmelCase = emb.weight.data
return lin_layer
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase = torch.load(_lowerCAmelCase , map_location='''cpu''' )
__UpperCAmelCase = mam_aaa["args"]
__UpperCAmelCase = mam_aaa["model"]
__UpperCAmelCase = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(_lowerCAmelCase )
rename_keys(_lowerCAmelCase )
__UpperCAmelCase = state_dict["decoder.embed_tokens.weight"].shape[0]
__UpperCAmelCase = args.share_decoder_input_output_embed
__UpperCAmelCase = [int(_lowerCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )]
__UpperCAmelCase = SpeechaTextConfig(
vocab_size=_lowerCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(_lowerCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=_lowerCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_lowerCAmelCase , num_beams=5 , max_length=2_0_0 , use_cache=_lowerCAmelCase , decoder_start_token_id=2 , early_stopping=_lowerCAmelCase , )
__UpperCAmelCase = SpeechaTextForConditionalGeneration(_lowerCAmelCase )
__UpperCAmelCase = model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0 and not set(_lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
__UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__UpperCAmelCase = lm_head_weights
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__lowerCAmelCase : int = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 262 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = (PNDMScheduler,)
__lowerCAmelCase = (("""num_inference_steps""", 50),)
def snake_case__ ( self , **lowerCAmelCase_ ):
__lowercase = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__A )
return config
def snake_case__ ( self , lowerCAmelCase_=0 , **lowerCAmelCase_ ):
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop("num_inference_steps" , __A )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config(**__A )
__lowercase = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
__lowercase = scheduler_class.from_pretrained(__A )
new_scheduler.set_timesteps(__A )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
__lowercase = new_scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__lowercase = scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
__lowercase = new_scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
pass
def snake_case__ ( self , lowerCAmelCase_=0 , **lowerCAmelCase_ ):
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop("num_inference_steps" , __A )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals (must be after setting timesteps)
__lowercase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
__lowercase = scheduler_class.from_pretrained(__A )
# copy over dummy past residuals
new_scheduler.set_timesteps(__A )
# copy over dummy past residual (must be after setting timesteps)
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
__lowercase = new_scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__lowercase = scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
__lowercase = new_scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self , **lowerCAmelCase_ ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**__A )
__lowercase = scheduler_class(**__A )
__lowercase = 10
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.prk_timesteps ):
__lowercase = model(__A , __A )
__lowercase = scheduler.step_prk(__A , __A , __A ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__lowercase = model(__A , __A )
__lowercase = scheduler.step_plms(__A , __A , __A ).prev_sample
return sample
def snake_case__ ( self ):
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop("num_inference_steps" , __A )
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**__A )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(__A , "set_timesteps" ):
scheduler.set_timesteps(__A )
elif num_inference_steps is not None and not hasattr(__A , "set_timesteps" ):
__lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowercase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.step_prk(__A , 0 , __A , **__A ).prev_sample
__lowercase = scheduler.step_prk(__A , 1 , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__lowercase = scheduler.step_plms(__A , 0 , __A , **__A ).prev_sample
__lowercase = scheduler.step_plms(__A , 1 , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case__ ( self ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def snake_case__ ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__A )
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(steps_offset=1 )
__lowercase = scheduler_class(**__A )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def snake_case__ ( self ):
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def snake_case__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def snake_case__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def snake_case__ ( self ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=__A )
def snake_case__ ( self ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__A )
def snake_case__ ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__lowercase = 27
for scheduler_class in self.scheduler_classes:
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__lowercase = scheduler.step_prk(__A , __A , __A ).prev_sample
def snake_case__ ( self ):
with self.assertRaises(__A ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**__A )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def snake_case__ ( self ):
__lowercase = self.full_loop()
__lowercase = torch.sum(torch.abs(__A ) )
__lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def snake_case__ ( self ):
__lowercase = self.full_loop(prediction_type="v_prediction" )
__lowercase = torch.sum(torch.abs(__A ) )
__lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def snake_case__ ( self ):
# We specify different beta, so that the first alpha is 0.99
__lowercase = self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
__lowercase = torch.sum(torch.abs(__A ) )
__lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def snake_case__ ( self ):
# We specify different beta, so that the first alpha is 0.99
__lowercase = self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
__lowercase = torch.sum(torch.abs(__A ) )
__lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 321 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 44 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.