code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case , __snake_case ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(__snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__snake_case ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__snake_case , p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __snake_case , __snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = (1 - _cos) / 2
_UpperCamelCase = 1 - _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = (1 + _cos) / 2
_UpperCamelCase = -1 - _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = _sin / 2
_UpperCamelCase = 0
_UpperCamelCase = -ba
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 1 - alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = 1 + alpha * big_a
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha * big_a
_UpperCamelCase = 1 + alpha / big_a
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha / big_a
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase = 2 * sqrt(__snake_case ) * alpha
_UpperCamelCase = big_a * (pmc + aaa)
_UpperCamelCase = 2 * big_a * mpc
_UpperCamelCase = big_a * (pmc - aaa)
_UpperCamelCase = ppmc + aaa
_UpperCamelCase = -2 * pmpc
_UpperCamelCase = ppmc - aaa
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase = 2 * sqrt(__snake_case ) * alpha
_UpperCamelCase = big_a * (ppmc + aaa)
_UpperCamelCase = -2 * big_a * pmpc
_UpperCamelCase = big_a * (ppmc - aaa)
_UpperCamelCase = pmc + aaa
_UpperCamelCase = 2 * mpc
_UpperCamelCase = pmc - aaa
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 10 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "trocr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=5_0265 , _A : Optional[Any]=1024 , _A : Optional[Any]=12 , _A : Any=16 , _A : Any=4096 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=512 , _A : Dict=0.1 , _A : List[str]=0.0 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=2 , _A : Any=0.02 , _A : List[str]=0.0 , _A : List[str]=True , _A : str=False , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[int]=1 , _A : int=0 , _A : Any=2 , **_A : Optional[int] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 10 | 1 |
from __future__ import annotations
_lowerCAmelCase = 8.988E9 # units = N * m^s * C^-2
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
_UpperCamelCase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_UpperCamelCase = abs(__snake_case ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_UpperCamelCase = abs(__snake_case ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_UpperCamelCase = (COULOMBS_CONSTANT * charge_product / abs(__snake_case )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Any=13 , _A : Union[str, Any]=7 , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[Any]=True , _A : Optional[int]=False , _A : Any=False , _A : int=False , _A : Optional[Any]=2 , _A : Any=99 , _A : str=0 , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : Tuple=4 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=512 , _A : Union[str, Any]=12 , _A : List[str]=2 , _A : int=0.02 , _A : Optional[Any]=3 , _A : Any=4 , _A : Optional[int]="last" , _A : Any=None , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_lengths
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = gelu_activation
_UpperCamelCase = sinusoidal_embeddings
_UpperCamelCase = causal
_UpperCamelCase = asm
_UpperCamelCase = n_langs
_UpperCamelCase = vocab_size
_UpperCamelCase = n_special
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = summary_type
_UpperCamelCase = use_proj
_UpperCamelCase = scope
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_input_lengths:
_UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self : str ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Any , _A : str , _A : Optional[int] , ):
_UpperCamelCase = FlaubertModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , lengths=_A , langs=_A )
_UpperCamelCase = model(_A , langs=_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : str , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int , _A : str , _A : List[Any] , _A : Any , ):
_UpperCamelCase = FlaubertWithLMHeadModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : str , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : Dict , ):
_UpperCamelCase = FlaubertForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Tuple , _A : str , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : int , _A : str , _A : Dict , _A : List[Any] , ):
_UpperCamelCase = FlaubertForQuestionAnswering(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : Tuple , _A : str , _A : int , _A : int , _A : Optional[int] , _A : Optional[int] , _A : int , _A : List[str] , ):
_UpperCamelCase = FlaubertForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[str] , _A : Optional[Any] , _A : str , _A : Union[str, Any] , _A : List[Any] , _A : int , _A : List[Any] , _A : str , _A : List[str] , ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = FlaubertForTokenClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : str , _A : Optional[Any] , _A : List[str] , _A : Any , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : List[str] , ):
_UpperCamelCase = self.num_choices
_UpperCamelCase = FlaubertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict , _A : Dict , _A : Tuple , _A : int , _A : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self : str , _A : Any , _A : List[str] , _A : Optional[int]=False ):
_UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = FlaubertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_A )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=_A )
_UpperCamelCase = self._prepare_for_class(_A , _A )
_UpperCamelCase = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
_UpperCamelCase = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
_UpperCamelCase = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 10 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["image_processor", "tokenizer"]
UpperCAmelCase = "AutoImageProcessor"
UpperCAmelCase = "AutoTokenizer"
def __init__( self : Union[str, Any] , _A : Dict , _A : Union[str, Any] ):
super().__init__(_A , _A )
_UpperCamelCase = self.image_processor
def __call__( self : int , _A : Tuple=None , _A : Tuple=None , _A : List[Any]=None , **_A : Dict ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase = self.tokenizer(_A , return_tensors=_A , **_A )
if images is not None:
_UpperCamelCase = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def UpperCamelCase_ ( self : Any , *_A : List[Any] , **_A : str ):
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCamelCase_ ( self : Any , *_A : int , **_A : Dict ):
return self.tokenizer.decode(*_A , **_A )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return ["input_ids", "attention_mask", "pixel_values"]
| 10 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCAmelCase_ ( pl.LightningModule ):
def __init__( self : List[str] , _A : str ):
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def UpperCamelCase_ ( self : List[str] ):
pass
def _snake_case ( __snake_case , __snake_case , __snake_case ):
# load longformer model from model identifier
_UpperCamelCase = LongformerModel.from_pretrained(__snake_case )
_UpperCamelCase = LightningModel(__snake_case )
_UpperCamelCase = torch.load(__snake_case , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(__snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__snake_case )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 10 | from __future__ import annotations
_lowerCAmelCase = [True] * 1_000_001
_lowerCAmelCase = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
_lowerCAmelCase = False
i += 1
def _snake_case ( __snake_case ):
return seive[n]
def _snake_case ( __snake_case ):
return any(digit in '''02468''' for digit in str(__snake_case ) )
def _snake_case ( __snake_case = 1000000 ):
_UpperCamelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__snake_case ) and not contains_an_even_digit(__snake_case ):
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(__snake_case ) )]
if all(is_prime(__snake_case ) for i in list_nums ):
result.append(__snake_case )
return result
def _snake_case ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 10 | 1 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "vision-encoder-decoder"
UpperCAmelCase = True
def __init__( self : Optional[Any] , **_A : Optional[int] ):
super().__init__(**_A )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
_UpperCamelCase = kwargs.pop('''encoder''' )
_UpperCamelCase = encoder_config.pop('''model_type''' )
_UpperCamelCase = kwargs.pop('''decoder''' )
_UpperCamelCase = decoder_config.pop('''model_type''' )
_UpperCamelCase = AutoConfig.for_model(_A , **_A )
_UpperCamelCase = AutoConfig.for_model(_A , **_A )
_UpperCamelCase = True
@classmethod
def UpperCamelCase_ ( cls : Tuple , _A : PretrainedConfig , _A : PretrainedConfig , **_A : Union[str, Any] ):
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_UpperCamelCase = True
_UpperCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.encoder.to_dict()
_UpperCamelCase = self.decoder.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = version.parse("1.11" )
@property
def UpperCamelCase_ ( self : str ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase_ ( self : Tuple ):
return 1e-4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class lowerCAmelCase_ ( __lowercase ):
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = OrderedDict()
_UpperCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_UpperCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_UpperCamelCase = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def UpperCamelCase_ ( self : List[str] , _A : "PreTrainedTokenizerBase" , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional["TensorType"] = None , ):
import torch
_UpperCamelCase = OrderedDict()
_UpperCamelCase = super().generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
_UpperCamelCase , _UpperCamelCase = dummy_input['''input_ids'''].shape
_UpperCamelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCamelCase = dummy_input.pop('''input_ids''' )
_UpperCamelCase = dummy_input.pop('''attention_mask''' )
_UpperCamelCase = torch.zeros(_A )
return common_inputs
class lowerCAmelCase_ ( __lowercase ):
@property
def UpperCamelCase_ ( self : Optional[int] ):
pass
def UpperCamelCase_ ( self : Optional[int] , _A : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(_A )
def UpperCamelCase_ ( self : str , _A : PretrainedConfig , _A : PretrainedConfig , _A : str = "default" ):
_UpperCamelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_A , _A )
| 10 | import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = DebertaVaTokenizer
UpperCAmelCase = DebertaVaTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = DebertaVaTokenizer(_A , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict , _A : Union[str, Any] ):
_UpperCamelCase = '''this is a test'''
_UpperCamelCase = '''this is a test'''
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_A ) , 3_0001 )
def UpperCamelCase_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase_ ( self : List[str] ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : int ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Tuple ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(_A )
_UpperCamelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''This is a test'''
_UpperCamelCase = [13, 1, 4398, 25, 21, 1289]
_UpperCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = DebertaVaTokenizer(_A , keep_accents=_A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_UpperCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = DebertaVaTokenizer(_A )
_UpperCamelCase = tokenizer.encode('''sequence builders''' )
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 10 | 1 |
def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
_UpperCamelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__snake_case )
if number < 1:
_UpperCamelCase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(__snake_case )
_UpperCamelCase = 1
for i in range(1 , __snake_case ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10 | 1 |
import math
def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = sum(i * i for i in range(1 , n + 1 ) )
_UpperCamelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | import logging
import os
from .state import PartialState
class lowerCAmelCase_ ( logging.LoggerAdapter ):
@staticmethod
def UpperCamelCase_ ( _A : Any ):
_UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : str , *_A : int , **_A : List[Any] ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCamelCase = kwargs.pop('''main_process_only''' , _A )
_UpperCamelCase = kwargs.pop('''in_order''' , _A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
elif in_order:
_UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
state.wait_for_everyone()
def _snake_case ( __snake_case , __snake_case = None ):
if log_level is None:
_UpperCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __snake_case )
_UpperCamelCase = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case , {} )
| 10 | 1 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _snake_case ( __snake_case ):
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : Iterable[torch.nn.Parameter] , _A : float = 0.9999 , _A : float = 0.0 , _A : int = 0 , _A : bool = False , _A : Union[float, int] = 1.0 , _A : Union[float, int] = 2 / 3 , _A : Optional[Any] = None , _A : Dict[str, Any] = None , **_A : Dict , ):
if isinstance(_A , torch.nn.Module ):
_UpperCamelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , _A , standard_warn=_A , )
_UpperCamelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCamelCase = True
if kwargs.get('''max_value''' , _A ) is not None:
_UpperCamelCase = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , _A , standard_warn=_A )
_UpperCamelCase = kwargs['''max_value''']
if kwargs.get('''min_value''' , _A ) is not None:
_UpperCamelCase = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , _A , standard_warn=_A )
_UpperCamelCase = kwargs['''min_value''']
_UpperCamelCase = list(_A )
_UpperCamelCase = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , _A ) is not None:
_UpperCamelCase = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , _A , standard_warn=_A )
self.to(device=kwargs['''device'''] )
_UpperCamelCase = None
_UpperCamelCase = decay
_UpperCamelCase = min_decay
_UpperCamelCase = update_after_step
_UpperCamelCase = use_ema_warmup
_UpperCamelCase = inv_gamma
_UpperCamelCase = power
_UpperCamelCase = 0
_UpperCamelCase = None # set in `step()`
_UpperCamelCase = model_cls
_UpperCamelCase = model_config
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , _A : Any , _A : str ):
_UpperCamelCase , _UpperCamelCase = model_cls.load_config(_A , return_unused_kwargs=_A )
_UpperCamelCase = model_cls.from_pretrained(_A )
_UpperCamelCase = cls(model.parameters() , model_cls=_A , model_config=model.config )
ema_model.load_state_dict(_A )
return ema_model
def UpperCamelCase_ ( self : Any , _A : str ):
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
_UpperCamelCase = self.model_cls.from_config(self.model_config )
_UpperCamelCase = self.state_dict()
state_dict.pop('''shadow_params''' , _A )
model.register_to_config(**_A )
self.copy_to(model.parameters() )
model.save_pretrained(_A )
def UpperCamelCase_ ( self : Optional[int] , _A : int ):
_UpperCamelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCamelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCamelCase = (1 + step) / (10 + step)
_UpperCamelCase = min(_A , self.decay )
# make sure decay is not smaller than min_decay
_UpperCamelCase = max(_A , self.min_decay )
return cur_decay_value
@torch.no_grad()
def UpperCamelCase_ ( self : Union[str, Any] , _A : Iterable[torch.nn.Parameter] ):
if isinstance(_A , torch.nn.Module ):
_UpperCamelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , _A , standard_warn=_A , )
_UpperCamelCase = parameters.parameters()
_UpperCamelCase = list(_A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCamelCase = self.get_decay(self.optimization_step )
_UpperCamelCase = decay
_UpperCamelCase = 1 - decay
_UpperCamelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCamelCase = deepspeed.zero.GatheredParameters(_A , modifier_rank=_A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_A )
def UpperCamelCase_ ( self : str , _A : Iterable[torch.nn.Parameter] ):
_UpperCamelCase = list(_A )
for s_param, param in zip(self.shadow_params , _A ):
param.data.copy_(s_param.to(param.device ).data )
def UpperCamelCase_ ( self : Dict , _A : Optional[Any]=None , _A : Dict=None ):
_UpperCamelCase = [
p.to(device=_A , dtype=_A ) if p.is_floating_point() else p.to(device=_A )
for p in self.shadow_params
]
def UpperCamelCase_ ( self : Any ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def UpperCamelCase_ ( self : List[Any] , _A : Iterable[torch.nn.Parameter] ):
_UpperCamelCase = [param.detach().cpu().clone() for param in parameters]
def UpperCamelCase_ ( self : str , _A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , _A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCamelCase = None
def UpperCamelCase_ ( self : Any , _A : dict ):
_UpperCamelCase = copy.deepcopy(_A )
_UpperCamelCase = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
_UpperCamelCase = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , _A ):
raise ValueError('''Invalid min_decay''' )
_UpperCamelCase = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , _A ):
raise ValueError('''Invalid optimization_step''' )
_UpperCamelCase = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , _A ):
raise ValueError('''Invalid update_after_step''' )
_UpperCamelCase = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _A ):
raise ValueError('''Invalid use_ema_warmup''' )
_UpperCamelCase = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
_UpperCamelCase = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
_UpperCamelCase = state_dict.get('''shadow_params''' , _A )
if shadow_params is not None:
_UpperCamelCase = shadow_params
if not isinstance(self.shadow_params , _A ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(_A , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 10 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = "▁"
_lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = BertGenerationTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[str] ):
super().setUp()
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''<s>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_A ) , 1002 )
def UpperCamelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''Hello World!'''
_UpperCamelCase = [1_8536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_UpperCamelCase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCamelCase_ ( self : Dict ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCamelCase = ''' '''.join(_A )
_UpperCamelCase = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = BertGenerationConfig()
_UpperCamelCase = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 10 | 1 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_lowerCAmelCase = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
_lowerCAmelCase = logging.WARNING
def _snake_case ( ):
_UpperCamelCase = os.getenv('''DATASETS_VERBOSITY''' , __snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def _snake_case ( ):
return __name__.split('''.''' )[0]
def _snake_case ( ):
return logging.getLogger(_get_library_name() )
def _snake_case ( ):
# Apply our default configuration to the library root logger.
_UpperCamelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _snake_case ( ):
_UpperCamelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _snake_case ( __snake_case = None ):
if name is None:
_UpperCamelCase = _get_library_name()
return logging.getLogger(__snake_case )
def _snake_case ( ):
return _get_library_root_logger().getEffectiveLevel()
def _snake_case ( __snake_case ):
_get_library_root_logger().setLevel(__snake_case )
def _snake_case ( ):
return set_verbosity(__snake_case )
def _snake_case ( ):
return set_verbosity(__snake_case )
def _snake_case ( ):
return set_verbosity(__snake_case )
def _snake_case ( ):
return set_verbosity(__snake_case )
def _snake_case ( ):
_UpperCamelCase = False
def _snake_case ( ):
_UpperCamelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
def __init__( self : Any , *_A : Union[str, Any] , **_A : str ): # pylint: disable=unused-argument
_UpperCamelCase = args[0] if args else None
def __iter__( self : Optional[Any] ):
return iter(self._iterator )
def __getattr__( self : Any , _A : Optional[int] ):
def empty_fn(*_A : List[Any] , **_A : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str ):
return self
def __exit__( self : str , _A : List[str] , _A : Dict , _A : str ):
return
_lowerCAmelCase = True
class lowerCAmelCase_ :
def __call__( self : int , *_A : Union[str, Any] , _A : Optional[int]=False , **_A : Dict ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_A , **_A )
else:
return EmptyTqdm(*_A , **_A )
def UpperCamelCase_ ( self : int , *_A : str , **_A : Optional[int] ):
_UpperCamelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCAmelCase = _tqdm_cls()
def _snake_case ( ):
global _tqdm_active
return bool(_tqdm_active )
def _snake_case ( ):
global _tqdm_active
_UpperCamelCase = True
def _snake_case ( ):
global _tqdm_active
_UpperCamelCase = False
| 10 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = "▁"
_lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
_lowerCAmelCase = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_lowerCAmelCase = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self : int , _A : Optional[Any] , _A : Union[str, Any]="<s>" , _A : List[Any]="</s>" , _A : Tuple="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[Any]="<pad>" , _A : Optional[Any]="<mask>" , _A : Optional[Dict[str, Any]] = None , **_A : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
_UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase = 1
_UpperCamelCase = len(self.sp_model ) + self.fairseq_offset
_UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ):
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
_UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , _A : Optional[int] ):
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def UpperCamelCase_ ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ):
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self : int ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : Optional[int] , _A : str ):
return self.sp_model.encode(_A , out_type=_A )
def UpperCamelCase_ ( self : Dict , _A : Optional[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase_ ( self : Dict , _A : str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self : Any , _A : int ):
_UpperCamelCase = ''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 10 | from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( __snake_case , __snake_case ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__snake_case , __snake_case ) ) )
def _snake_case ( __snake_case , __snake_case ):
if dataset.ndim != value_array.ndim:
_UpperCamelCase = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = []
for value in value_array:
_UpperCamelCase = euclidean(__snake_case , dataset[0] )
_UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase = euclidean(__snake_case , __snake_case )
if dist > temp_dist:
_UpperCamelCase = temp_dist
_UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( __snake_case , __snake_case ):
return np.dot(__snake_case , __snake_case ) / (norm(__snake_case ) * norm(__snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( __snake_case , __snake_case , __snake_case ):
def get_masked_lm_array(__snake_case ):
_UpperCamelCase = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_array(__snake_case ):
_UpperCamelCase = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_layer_array(__snake_case , __snake_case ):
_UpperCamelCase = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_attention_layer_array(__snake_case , __snake_case , __snake_case ):
_UpperCamelCase = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
_UpperCamelCase = array.reshape(__snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
print(f"""Loading model based on config from {config_path}...""" )
_UpperCamelCase = BertConfig.from_json_file(__snake_case )
_UpperCamelCase = BertForMaskedLM(__snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_UpperCamelCase = model.bert.encoder.layer[layer_index]
# Self-attention
_UpperCamelCase = layer.attention.self
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
_UpperCamelCase = layer.attention.output
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_attention_layer_norm/gamma''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
_UpperCamelCase = layer.intermediate
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_intermediate_dense/kernel''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_intermediate_dense/bias''' )
# Output
_UpperCamelCase = layer.output
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_dense/kernel''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_dense/bias''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_layer_norm/gamma''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_layer_norm/beta''' )
# Embeddings
_UpperCamelCase = get_encoder_array('''_position_embedding_layer/embeddings''' )
_UpperCamelCase = get_encoder_array('''_type_embedding_layer/embeddings''' )
_UpperCamelCase = get_encoder_array('''_embedding_norm_layer/gamma''' )
_UpperCamelCase = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
_UpperCamelCase = model.cls.predictions.transform
_UpperCamelCase = get_masked_lm_array('''dense/kernel''' )
_UpperCamelCase = get_masked_lm_array('''dense/bias''' )
_UpperCamelCase = get_masked_lm_array('''layer_norm/gamma''' )
_UpperCamelCase = get_masked_lm_array('''layer_norm/beta''' )
_UpperCamelCase = get_masked_lm_array('''embedding_table''' )
# Pooling
_UpperCamelCase = BertPooler(config=__snake_case )
_UpperCamelCase = get_encoder_array('''_pooler_layer/kernel''' )
_UpperCamelCase = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(__snake_case )
# Integration test - should load without any errors ;)
_UpperCamelCase = BertForMaskedLM.from_pretrained(__snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 10 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ShapEPipeline
UpperCAmelCase = ["prompt"]
UpperCAmelCase = ["prompt"]
UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase = PriorTransformer(**_A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase = ShapERenderer(**_A )
return model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.dummy_prior
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = self.dummy_tokenizer
_UpperCamelCase = self.dummy_renderer
_UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
_UpperCamelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch_device == '''cpu'''
_UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase = batch_size * [inputs[key]]
_UpperCamelCase = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(0 )
_UpperCamelCase = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 10 | 1 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (DDIMParallelScheduler,)
UpperCAmelCase = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCamelCase_ ( self : Dict , **_A : Any ):
_UpperCamelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : str , **_A : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(**_A )
_UpperCamelCase = scheduler_class(**_A )
_UpperCamelCase , _UpperCamelCase = 10, 0.0
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for t in scheduler.timesteps:
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A , _A ).prev_sample
return sample
def UpperCamelCase_ ( self : List[str] ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_A )
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(steps_offset=1 )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCamelCase_ ( self : Optional[int] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def UpperCamelCase_ ( self : Tuple ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def UpperCamelCase_ ( self : Tuple ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_A )
def UpperCamelCase_ ( self : List[Any] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def UpperCamelCase_ ( self : str ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_A )
def UpperCamelCase_ ( self : int ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_A , num_inference_steps=_A )
def UpperCamelCase_ ( self : Dict ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_A , eta=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
_UpperCamelCase , _UpperCamelCase = 10, 0.0
scheduler.set_timesteps(_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = self.dummy_sample_deter + 0.1
_UpperCamelCase = self.dummy_sample_deter - 0.1
_UpperCamelCase = samplea.shape[0]
_UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_UpperCamelCase = torch.arange(_A )[0:3, None].repeat(1 , _A )
_UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_UpperCamelCase = scheduler.batch_step_no_noise(_A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _A )
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.full_loop()
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.full_loop(prediction_type='''v_prediction''' )
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def UpperCamelCase_ ( self : Any ):
# We specify different beta, so that the first alpha is 0.99
_UpperCamelCase = self.full_loop(set_alpha_to_one=_A , beta_start=0.01 )
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def UpperCamelCase_ ( self : List[Any] ):
# We specify different beta, so that the first alpha is 0.99
_UpperCamelCase = self.full_loop(set_alpha_to_one=_A , beta_start=0.01 )
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 10 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCAmelCase = HfApi()
_lowerCAmelCase = {}
# fmt: off
_lowerCAmelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_lowerCAmelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_lowerCAmelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_lowerCAmelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_lowerCAmelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_lowerCAmelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_lowerCAmelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_lowerCAmelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_lowerCAmelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_lowerCAmelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_lowerCAmelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_lowerCAmelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_lowerCAmelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_lowerCAmelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_lowerCAmelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_lowerCAmelCase = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCAmelCase = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith("CompVis"):
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 10 | 1 |
from __future__ import annotations
def _snake_case ( __snake_case ):
if not nums:
raise ValueError('''List is empty''' )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( __snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += [key]
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
def _snake_case ( *__snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += keys
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
class lowerCAmelCase_ ( __lowercase ):
def __new__( cls : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ):
_UpperCamelCase = super().__new__(cls , _A , _A , _A )
if not hasattr(_A , '''key_handler''' ):
setattr(_A , '''key_handler''' , {} )
setattr(_A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCamelCase = getattr(_A , '''handle_key''' , [] )
for key in handled_keys:
_UpperCamelCase = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls : str ):
_UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCamelCase = ord(_A )
_UpperCamelCase = cls.key_handler.get(_A )
if handler:
_UpperCamelCase = char
return handler(cls )
else:
return None
def _snake_case ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 10 | 1 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = multiprocessing.Manager()
_UpperCamelCase = manager.list()
_UpperCamelCase = multiprocessing.Process(target=__snake_case , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _snake_case ( __snake_case , __snake_case , __snake_case ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCamelCase = shutil.rmtree
_UpperCamelCase = os.rmdir
_UpperCamelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCamelCase = {}
with swallow_io():
with time_limit(__snake_case ):
exec(__snake_case , __snake_case )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
_UpperCamelCase = rmtree
_UpperCamelCase = rmdir
_UpperCamelCase = chdir
@contextlib.contextmanager
def _snake_case ( __snake_case ):
def signal_handler(__snake_case , __snake_case ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __snake_case )
signal.signal(signal.SIGALRM , __snake_case )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _snake_case ( ):
_UpperCamelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(__snake_case ):
with contextlib.redirect_stderr(__snake_case ):
with redirect_stdin(__snake_case ):
yield
@contextlib.contextmanager
def _snake_case ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__snake_case ):
yield dirname
class lowerCAmelCase_ ( __lowercase ):
pass
class lowerCAmelCase_ ( io.StringIO ):
def UpperCamelCase_ ( self : str , *_A : Tuple , **_A : int ):
raise OSError
def UpperCamelCase_ ( self : int , *_A : List[Any] , **_A : Optional[Any] ):
raise OSError
def UpperCamelCase_ ( self : Optional[int] , *_A : Any , **_A : Dict ):
raise OSError
def UpperCamelCase_ ( self : int , *_A : Tuple , **_A : str ):
return False
class lowerCAmelCase_ ( contextlib._RedirectStream ): # type: ignore
UpperCAmelCase = "stdin"
@contextlib.contextmanager
def _snake_case ( __snake_case ):
if root == ".":
yield
return
_UpperCamelCase = os.getcwd()
os.chdir(__snake_case )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__snake_case )
def _snake_case ( __snake_case=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCamelCase = None
_UpperCamelCase = None
import os
_UpperCamelCase = '''1'''
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
import shutil
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
import subprocess
_UpperCamelCase = None # type: ignore
_UpperCamelCase = None
import sys
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
| 10 | import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
] , )
_UpperCamelCase = text_generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
] , )
@require_tf
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_A )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def UpperCamelCase_ ( self : int , _A : str , _A : Union[str, Any] , _A : Any ):
_UpperCamelCase = TextGenerationPipeline(model=_A , tokenizer=_A )
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = '''Hello I believe in'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
_UpperCamelCase = text_generator(_A )
self.assertEqual(
_A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
_UpperCamelCase = text_generator(_A , stop_sequence=''' fe''' )
self.assertEqual(_A , [{'''generated_text''': '''Hello I believe in fe'''}] )
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : Union[str, Any] ):
_UpperCamelCase = text_generator.model
_UpperCamelCase = text_generator.tokenizer
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = pipeline(task='''text-generation''' , model=_A , tokenizer=_A , return_full_text=_A )
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCamelCase = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_text=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_tensors=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_text=_A , return_tensors=_A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCamelCase = text_generator('''''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCamelCase = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
_UpperCamelCase = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_A ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
# Classic `model_kwargs`
_UpperCamelCase = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_A , top_p=0.5 )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = '''Hello world'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
_UpperCamelCase = logging.get_logger('''transformers.generation.tf_utils''' )
else:
_UpperCamelCase = logging.get_logger('''transformers.generation.utils''' )
_UpperCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 , max_new_tokens=1 )
self.assertIn(_A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_new_tokens=1 )
self.assertNotIn(_A , cl.out )
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 )
self.assertNotIn(_A , cl.out )
| 10 | 1 |
from string import ascii_uppercase
_lowerCAmelCase = {str(ord(c) - 55): c for c in ascii_uppercase}
def _snake_case ( __snake_case , __snake_case ):
if isinstance(__snake_case , __snake_case ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(__snake_case , __snake_case ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(__snake_case , __snake_case ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
_UpperCamelCase = ''''''
_UpperCamelCase = 0
_UpperCamelCase = 0
while div != 1:
_UpperCamelCase , _UpperCamelCase = divmod(__snake_case , __snake_case )
if base >= 11 and 9 < mod < 36:
_UpperCamelCase = ALPHABET_VALUES[str(__snake_case )]
else:
_UpperCamelCase = str(__snake_case )
new_value += actual_value
_UpperCamelCase = num // base
_UpperCamelCase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__snake_case )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 10 | def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = (n * (n + 1) // 2) ** 2
_UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : Any , _A : bool = True , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Union[str, Any] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 256}
_UpperCamelCase = get_size_dict(_A , default_to_square=_A )
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[Any] , ):
_UpperCamelCase = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(_A , size=size['''shortest_edge'''] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ):
_UpperCamelCase = get_size_dict(_A )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def UpperCamelCase_ ( self : int , _A : np.ndarray , _A : float , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : ImageInput , _A : Optional[bool] = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_A : Dict , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A , default_to_square=_A )
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 10 | import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
def constraint_to_multiple_of(__snake_case , __snake_case , __snake_case=0 , __snake_case=None ):
_UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCamelCase = (output_size, output_size) if isinstance(__snake_case , __snake_case ) else output_size
_UpperCamelCase , _UpperCamelCase = get_image_size(__snake_case )
_UpperCamelCase , _UpperCamelCase = output_size
# determine new height and width
_UpperCamelCase = output_height / input_height
_UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCamelCase = scale_width
else:
# fit height
_UpperCamelCase = scale_height
_UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__snake_case )
_UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__snake_case )
return (new_height, new_width)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = False , _A : int = 1 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : List[str] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''height''': 384, '''width''': 384}
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : bool = False , _A : int = 1 , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
_UpperCamelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(
_A , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : int , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : int = None , _A : bool = None , _A : int = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
def UpperCamelCase_ ( self : Any , _A : Any , _A : List[Tuple] = None ):
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_A ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(_A ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 10 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_lowerCAmelCase = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case=None ):
# Initialise PyTorch model
_UpperCamelCase = XLNetConfig.from_json_file(__snake_case )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(__snake_case )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(__snake_case )
else:
_UpperCamelCase = XLNetLMHeadModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
_UpperCamelCase = os.path.join(__snake_case , __snake_case )
_UpperCamelCase = os.path.join(__snake_case , __snake_case )
print(f"""Save PyTorch model to {os.path.abspath(__snake_case )}""" )
torch.save(model.state_dict() , __snake_case )
print(f"""Save configuration file to {os.path.abspath(__snake_case )}""" )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_lowerCAmelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 10 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : str , _A : List[str] , _A : Optional[Any] , _A : List[str] , _A : Optional[int]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 10 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : Any , _A : List[Any]=7 , _A : int=3 , _A : Tuple=30 , _A : Union[str, Any]=400 , _A : List[str]=True , _A : Union[str, Any]=None , _A : int=True , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : Optional[Any]=[0.5, 0.5, 0.5] , _A : List[str]=True , _A : Union[str, Any]=1 / 255 , _A : Union[str, Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_pad
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self : int , _A : Tuple , _A : Union[str, Any]=False ):
if not batched:
_UpperCamelCase = image_inputs[0]
if isinstance(_A , Image.Image ):
_UpperCamelCase , _UpperCamelCase = image.size
else:
_UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCamelCase = int(self.size['''shortest_edge'''] * h / w )
_UpperCamelCase = self.size['''shortest_edge''']
elif w > h:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = self.size['''shortest_edge''']
else:
_UpperCamelCase = []
for image in image_inputs:
_UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCamelCase = max(_A , key=lambda _A : item[0] )[0]
_UpperCamelCase = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''do_rescale''' ) )
self.assertTrue(hasattr(_A , '''do_pad''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , _A )
_UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(_A , batched=_A )
_UpperCamelCase = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(_A , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(_A , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
# prepare image and target
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
_UpperCamelCase = DeformableDetrImageProcessor()
_UpperCamelCase = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
_UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
_UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def UpperCamelCase_ ( self : List[str] ):
# prepare image, target and masks_path
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
_UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_UpperCamelCase = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
_UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
_UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
_UpperCamelCase = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 10 | import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_lowerCAmelCase = True
from torch.cuda.amp import autocast
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase = field(
default=0.1, metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
}, )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."}, )
UpperCAmelCase = field(
default=0.0_5, metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
}, )
UpperCAmelCase = field(default=0.0, metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(
default="train+validation", metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The number of processes to use for the preprocessing."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
}, )
UpperCAmelCase = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"], metadata={"help": "A list of characters to remove from the transcripts."}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
def __call__( self : Union[str, Any] , _A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_UpperCamelCase = [{'''input_values''': feature['''input_values''']} for feature in features]
_UpperCamelCase = [{'''input_ids''': feature['''labels''']} for feature in features]
_UpperCamelCase = self.processor.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_UpperCamelCase = self.processor.pad(
labels=_A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_UpperCamelCase = labels
return batch
class lowerCAmelCase_ ( __lowercase ):
def UpperCamelCase_ ( self : Dict , _A : nn.Module , _A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
_UpperCamelCase = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(_A , _A )
else:
_UpperCamelCase = self.compute_loss(_A , _A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
return loss.detach()
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
_UpperCamelCase = f"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__snake_case ):
_UpperCamelCase = re.sub(__snake_case , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
_UpperCamelCase = train_dataset.map(__snake_case , remove_columns=['''sentence'''] )
_UpperCamelCase = eval_dataset.map(__snake_case , remove_columns=['''sentence'''] )
def extract_all_chars(__snake_case ):
_UpperCamelCase = ''' '''.join(batch['''text'''] )
_UpperCamelCase = list(set(__snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=train_dataset.column_names , )
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=eval_dataset.column_names , )
_UpperCamelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
_UpperCamelCase = {v: k for k, v in enumerate(__snake_case )}
_UpperCamelCase = vocab_dict[''' ''']
del vocab_dict[" "]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(__snake_case )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(__snake_case , __snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=__snake_case , return_attention_mask=__snake_case )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
_UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(__snake_case ) , data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(__snake_case ) )
if data_args.max_val_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__snake_case ):
_UpperCamelCase , _UpperCamelCase = torchaudio.load(batch['''path'''] )
_UpperCamelCase = resampler(__snake_case ).squeeze().numpy()
_UpperCamelCase = 16000
_UpperCamelCase = batch['''text''']
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__snake_case ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_UpperCamelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__snake_case )
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase = datasets.load_metric('''wer''' )
def compute_metrics(__snake_case ):
_UpperCamelCase = pred.predictions
_UpperCamelCase = np.argmax(__snake_case , axis=-1 )
_UpperCamelCase = processor.tokenizer.pad_token_id
_UpperCamelCase = processor.batch_decode(__snake_case )
# we do not want to group tokens when computing the metrics
_UpperCamelCase = processor.batch_decode(pred.label_ids , group_tokens=__snake_case )
_UpperCamelCase = wer_metric.compute(predictions=__snake_case , references=__snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase = DataCollatorCTCWithPadding(processor=__snake_case , padding=__snake_case )
# Initialize our Trainer
_UpperCamelCase = CTCTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , compute_metrics=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case )
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
return results
if __name__ == "__main__":
main()
| 10 | 1 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
_UpperCamelCase = timm.create_model('''levit_128s''' , pretrained=__snake_case )
else:
_UpperCamelCase = timm.create_model('''levit_128''' , pretrained=__snake_case )
if hidden_sizes == 192:
_UpperCamelCase = timm.create_model('''levit_192''' , pretrained=__snake_case )
if hidden_sizes == 256:
_UpperCamelCase = timm.create_model('''levit_256''' , pretrained=__snake_case )
if hidden_sizes == 384:
_UpperCamelCase = timm.create_model('''levit_384''' , pretrained=__snake_case )
from_model.eval()
_UpperCamelCase = LevitForImageClassificationWithTeacher(__snake_case ).eval()
_UpperCamelCase = OrderedDict()
_UpperCamelCase = from_model.state_dict()
_UpperCamelCase = list(from_model.state_dict().keys() )
_UpperCamelCase = list(our_model.state_dict().keys() )
print(len(__snake_case ) , len(__snake_case ) )
for i in range(len(__snake_case ) ):
_UpperCamelCase = weights[og_keys[i]]
our_model.load_state_dict(__snake_case )
_UpperCamelCase = torch.randn((2, 3, 224, 224) )
_UpperCamelCase = from_model(__snake_case )
_UpperCamelCase = our_model(__snake_case ).logits
assert torch.allclose(__snake_case , __snake_case ), "The model logits don't match the original one."
_UpperCamelCase = name
print(__snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_UpperCamelCase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _snake_case ( __snake_case , __snake_case = None , __snake_case = True ):
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = 1000
_UpperCamelCase = (1, num_labels)
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = num_labels
_UpperCamelCase = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = partial(__snake_case , num_labels=__snake_case , idalabel=__snake_case , labelaid=__snake_case )
_UpperCamelCase = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
_UpperCamelCase = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __snake_case , names_to_config[model_name] , __snake_case , __snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __snake_case , __snake_case , __snake_case , __snake_case )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 10 | import math
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : int=0 ): # a graph with Node 0,1,...,N-1
_UpperCamelCase = n
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # adjacency matrix for weight
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase_ ( self : Dict , _A : str , _A : List[str] , _A : Optional[Any] ):
_UpperCamelCase = w
def UpperCamelCase_ ( self : Optional[int] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] , _A : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 10 | 1 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , _A : Tuple , _A : Dict=768 ):
super().__init__(_A )
_UpperCamelCase = proj_size
_UpperCamelCase = CLIPVisionModel(_A )
_UpperCamelCase = PaintByExampleMapper(_A )
_UpperCamelCase = nn.LayerNorm(config.hidden_size )
_UpperCamelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_UpperCamelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCamelCase_ ( self : Tuple , _A : List[str] , _A : str=False ):
_UpperCamelCase = self.model(pixel_values=_A )
_UpperCamelCase = clip_output.pooler_output
_UpperCamelCase = self.mapper(latent_states[:, None] )
_UpperCamelCase = self.final_layer_norm(_A )
_UpperCamelCase = self.proj_out(_A )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Optional[Any] , _A : Optional[int] ):
super().__init__()
_UpperCamelCase = (config.num_hidden_layers + 1) // 5
_UpperCamelCase = config.hidden_size
_UpperCamelCase = 1
_UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(_A , _A , _A , activation_fn='''gelu''' , attention_bias=_A )
for _ in range(_A )
] )
def UpperCamelCase_ ( self : Optional[Any] , _A : int ):
for block in self.blocks:
_UpperCamelCase = block(_A )
return hidden_states
| 10 | import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = list_field(
default=[], metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
}, )
UpperCAmelCase = list_field(
default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
UpperCAmelCase = list_field(
default=[8, 32, 128, 512], metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Use FP16 to accelerate inference."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Benchmark training of model"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Verbose memory tracing"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Trace memory line by line"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save result to a CSV file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save all print statements in a log file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to print environment information"} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
}, )
UpperCAmelCase = field(
default=F"""inference_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv."}, )
UpperCAmelCase = field(
default=F"""inference_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv."}, )
UpperCAmelCase = field(
default=F"""train_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv for training."}, )
UpperCAmelCase = field(
default=F"""train_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv for training."}, )
UpperCAmelCase = field(
default=F"""env_info_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving environment information."}, )
UpperCAmelCase = field(
default=F"""log_{round(time() )}.csv""", metadata={"help": "Log filename used if print statements are saved in log."}, )
UpperCAmelCase = field(default=3, metadata={"help": "Times an experiment will be run."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
}, )
def UpperCamelCase_ ( self : Union[str, Any] ):
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _A , )
def UpperCamelCase_ ( self : str ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : List[Any] ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 10 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *__snake_case , __snake_case = None , __snake_case=True , __snake_case=2 ):
from .. import __version__
_UpperCamelCase = take_from
_UpperCamelCase = ()
if not isinstance(args[0] , __snake_case ):
_UpperCamelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__snake_case ).base_version ) >= version.parse(__snake_case ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
_UpperCamelCase = None
if isinstance(__snake_case , __snake_case ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__snake_case ),)
_UpperCamelCase = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__snake_case , __snake_case ):
values += (getattr(__snake_case , __snake_case ),)
_UpperCamelCase = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_UpperCamelCase = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_UpperCamelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __snake_case , stacklevel=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0:
_UpperCamelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCamelCase = call_frame.filename
_UpperCamelCase = call_frame.lineno
_UpperCamelCase = call_frame.function
_UpperCamelCase , _UpperCamelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__snake_case ) == 0:
return
elif len(__snake_case ) == 1:
return values[0]
return values
| 10 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCamelCase_ ( self : Tuple , _A : str , _A : List[Any] , _A : List[Any] ):
_UpperCamelCase = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_UpperCamelCase = VideoClassificationPipeline(model=_A , image_processor=_A , top_k=2 )
_UpperCamelCase = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : Union[str, Any] ):
for example in examples:
_UpperCamelCase = video_classifier(_A )
self.assertEqual(
_A , [
{'''score''': ANY(_A ), '''label''': ANY(_A )},
{'''score''': ANY(_A ), '''label''': ANY(_A )},
] , )
@require_torch
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
_UpperCamelCase = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
_UpperCamelCase = pipeline(
'''video-classification''' , model=_A , feature_extractor=_A , frame_sampling_rate=4 )
_UpperCamelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_UpperCamelCase = video_classifier(_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
_UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def UpperCamelCase_ ( self : Optional[int] ):
pass
| 10 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case , __snake_case ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(__snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__snake_case ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__snake_case , p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __snake_case , __snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 | 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any , _A : str ):
_UpperCamelCase = 3
_UpperCamelCase = 250
_UpperCamelCase = ids_tensor((batch_size, length) , _A )
_UpperCamelCase = torch.ones((batch_size, length) , device=_A , dtype=torch.float ) / length
return input_ids, scores
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
_UpperCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = MaxLengthCriteria(max_length=10 )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
_UpperCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
_UpperCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_A , _A ) )
def UpperCamelCase_ ( self : Any ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_A ) , 1 )
| 10 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "trocr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=5_0265 , _A : Optional[Any]=1024 , _A : Optional[Any]=12 , _A : Any=16 , _A : Any=4096 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=512 , _A : Dict=0.1 , _A : List[str]=0.0 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=2 , _A : Any=0.02 , _A : List[str]=0.0 , _A : List[str]=True , _A : str=False , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[int]=1 , _A : int=0 , _A : Any=2 , **_A : Optional[int] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 10 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = None
UpperCAmelCase = None
def _snake_case ( __snake_case ):
# Validation
def is_valid_tree(__snake_case ) -> bool:
if node is None:
return True
if not isinstance(__snake_case , __snake_case ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__snake_case ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__snake_case , __snake_case , __snake_case ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __snake_case , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __snake_case )
)
return is_binary_search_tree_recursive_check(__snake_case , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Any=13 , _A : Union[str, Any]=7 , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[Any]=True , _A : Optional[int]=False , _A : Any=False , _A : int=False , _A : Optional[Any]=2 , _A : Any=99 , _A : str=0 , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : Tuple=4 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=512 , _A : Union[str, Any]=12 , _A : List[str]=2 , _A : int=0.02 , _A : Optional[Any]=3 , _A : Any=4 , _A : Optional[int]="last" , _A : Any=None , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_lengths
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = gelu_activation
_UpperCamelCase = sinusoidal_embeddings
_UpperCamelCase = causal
_UpperCamelCase = asm
_UpperCamelCase = n_langs
_UpperCamelCase = vocab_size
_UpperCamelCase = n_special
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = summary_type
_UpperCamelCase = use_proj
_UpperCamelCase = scope
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_input_lengths:
_UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self : str ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Any , _A : str , _A : Optional[int] , ):
_UpperCamelCase = FlaubertModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , lengths=_A , langs=_A )
_UpperCamelCase = model(_A , langs=_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : str , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int , _A : str , _A : List[Any] , _A : Any , ):
_UpperCamelCase = FlaubertWithLMHeadModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : str , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : Dict , ):
_UpperCamelCase = FlaubertForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Tuple , _A : str , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : int , _A : str , _A : Dict , _A : List[Any] , ):
_UpperCamelCase = FlaubertForQuestionAnswering(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : Tuple , _A : str , _A : int , _A : int , _A : Optional[int] , _A : Optional[int] , _A : int , _A : List[str] , ):
_UpperCamelCase = FlaubertForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[str] , _A : Optional[Any] , _A : str , _A : Union[str, Any] , _A : List[Any] , _A : int , _A : List[Any] , _A : str , _A : List[str] , ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = FlaubertForTokenClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : str , _A : Optional[Any] , _A : List[str] , _A : Any , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : List[str] , ):
_UpperCamelCase = self.num_choices
_UpperCamelCase = FlaubertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict , _A : Dict , _A : Tuple , _A : int , _A : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self : str , _A : Any , _A : List[str] , _A : Optional[int]=False ):
_UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = FlaubertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_A )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=_A )
_UpperCamelCase = self._prepare_for_class(_A , _A )
_UpperCamelCase = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
_UpperCamelCase = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
_UpperCamelCase = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 10 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "efficientnet"
def __init__( self : List[str] , _A : int = 3 , _A : int = 600 , _A : float = 2.0 , _A : float = 3.1 , _A : int = 8 , _A : List[int] = [3, 3, 5, 3, 5, 5, 3] , _A : List[int] = [32, 16, 24, 40, 80, 112, 192] , _A : List[int] = [16, 24, 40, 80, 112, 192, 320] , _A : List[int] = [] , _A : List[int] = [1, 2, 2, 2, 1, 2, 1] , _A : List[int] = [1, 2, 2, 3, 3, 4, 1] , _A : List[int] = [1, 6, 6, 6, 6, 6, 6] , _A : float = 0.25 , _A : str = "swish" , _A : int = 2560 , _A : str = "mean" , _A : float = 0.02 , _A : float = 0.001 , _A : float = 0.99 , _A : float = 0.5 , _A : float = 0.2 , **_A : Dict , ):
super().__init__(**_A )
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = width_coefficient
_UpperCamelCase = depth_coefficient
_UpperCamelCase = depth_divisor
_UpperCamelCase = kernel_sizes
_UpperCamelCase = in_channels
_UpperCamelCase = out_channels
_UpperCamelCase = depthwise_padding
_UpperCamelCase = strides
_UpperCamelCase = num_block_repeats
_UpperCamelCase = expand_ratios
_UpperCamelCase = squeeze_expansion_ratio
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dim
_UpperCamelCase = pooling_type
_UpperCamelCase = initializer_range
_UpperCamelCase = batch_norm_eps
_UpperCamelCase = batch_norm_momentum
_UpperCamelCase = dropout_rate
_UpperCamelCase = drop_connect_rate
_UpperCamelCase = sum(_A ) * 4
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = version.parse("1.11" )
@property
def UpperCamelCase_ ( self : Optional[int] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase_ ( self : str ):
return 1e-5
| 10 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 | 1 |
def _snake_case ( __snake_case = 10**12 ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = 1
_UpperCamelCase = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | from __future__ import annotations
_lowerCAmelCase = [True] * 1_000_001
_lowerCAmelCase = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
_lowerCAmelCase = False
i += 1
def _snake_case ( __snake_case ):
return seive[n]
def _snake_case ( __snake_case ):
return any(digit in '''02468''' for digit in str(__snake_case ) )
def _snake_case ( __snake_case = 1000000 ):
_UpperCamelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__snake_case ) and not contains_an_even_digit(__snake_case ):
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(__snake_case ) )]
if all(is_prime(__snake_case ) for i in list_nums ):
result.append(__snake_case )
return result
def _snake_case ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 10 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : int , _A : TransformeraDModel , _A : AutoencoderKL , _A : KarrasDiffusionSchedulers , _A : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
_UpperCamelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
_UpperCamelCase = int(_A )
_UpperCamelCase = dict(sorted(self.labels.items() ) )
def UpperCamelCase_ ( self : str , _A : Union[str, List[str]] ):
if not isinstance(_A , _A ):
_UpperCamelCase = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Any , _A : List[int] , _A : float = 4.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , ):
_UpperCamelCase = len(_A )
_UpperCamelCase = self.transformer.config.sample_size
_UpperCamelCase = self.transformer.config.in_channels
_UpperCamelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
_UpperCamelCase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_UpperCamelCase = torch.tensor(_A , device=self.device ).reshape(-1 )
_UpperCamelCase = torch.tensor([1000] * batch_size , device=self.device )
_UpperCamelCase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_UpperCamelCase = latent_model_input[: len(_A ) // 2]
_UpperCamelCase = torch.cat([half, half] , dim=0 )
_UpperCamelCase = self.scheduler.scale_model_input(_A , _A )
_UpperCamelCase = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_UpperCamelCase = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
_UpperCamelCase = torch.floataa if is_mps else torch.floataa
else:
_UpperCamelCase = torch.intaa if is_mps else torch.intaa
_UpperCamelCase = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCamelCase = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_UpperCamelCase = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
_UpperCamelCase , _UpperCamelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_UpperCamelCase , _UpperCamelCase = torch.split(_A , len(_A ) // 2 , dim=0 )
_UpperCamelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_UpperCamelCase = torch.cat([half_eps, half_eps] , dim=0 )
_UpperCamelCase = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_UpperCamelCase , _UpperCamelCase = torch.split(_A , _A , dim=1 )
else:
_UpperCamelCase = noise_pred
# compute previous image: x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
_UpperCamelCase , _UpperCamelCase = latent_model_input.chunk(2 , dim=0 )
else:
_UpperCamelCase = latent_model_input
_UpperCamelCase = 1 / self.vae.config.scaling_factor * latents
_UpperCamelCase = self.vae.decode(_A ).sample
_UpperCamelCase = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCamelCase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 10 | import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = DebertaVaTokenizer
UpperCAmelCase = DebertaVaTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = DebertaVaTokenizer(_A , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict , _A : Union[str, Any] ):
_UpperCamelCase = '''this is a test'''
_UpperCamelCase = '''this is a test'''
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_A ) , 3_0001 )
def UpperCamelCase_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase_ ( self : List[str] ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : int ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Tuple ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(_A )
_UpperCamelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''This is a test'''
_UpperCamelCase = [13, 1, 4398, 25, 21, 1289]
_UpperCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = DebertaVaTokenizer(_A , keep_accents=_A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_UpperCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = DebertaVaTokenizer(_A )
_UpperCamelCase = tokenizer.encode('''sequence builders''' )
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 10 | 1 |
def _snake_case ( __snake_case ):
_UpperCamelCase = 0
for ch in input_str:
_UpperCamelCase = ord(__snake_case )
_UpperCamelCase = pow(2 , __snake_case )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Any=13 , _A : Union[str, Any]=7 , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[Any]=True , _A : Optional[int]=False , _A : Any=False , _A : int=False , _A : Optional[Any]=2 , _A : Any=99 , _A : str=0 , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : Tuple=4 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=512 , _A : Union[str, Any]=12 , _A : List[str]=2 , _A : int=0.02 , _A : Optional[Any]=3 , _A : Any=4 , _A : Optional[int]="last" , _A : Any=None , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_lengths
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = gelu_activation
_UpperCamelCase = sinusoidal_embeddings
_UpperCamelCase = causal
_UpperCamelCase = asm
_UpperCamelCase = n_langs
_UpperCamelCase = vocab_size
_UpperCamelCase = n_special
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = summary_type
_UpperCamelCase = use_proj
_UpperCamelCase = scope
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_input_lengths:
_UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self : str ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Any , _A : str , _A : Optional[int] , ):
_UpperCamelCase = FlaubertModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , lengths=_A , langs=_A )
_UpperCamelCase = model(_A , langs=_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : str , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int , _A : str , _A : List[Any] , _A : Any , ):
_UpperCamelCase = FlaubertWithLMHeadModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : str , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : Dict , ):
_UpperCamelCase = FlaubertForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Tuple , _A : str , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : int , _A : str , _A : Dict , _A : List[Any] , ):
_UpperCamelCase = FlaubertForQuestionAnswering(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : Tuple , _A : str , _A : int , _A : int , _A : Optional[int] , _A : Optional[int] , _A : int , _A : List[str] , ):
_UpperCamelCase = FlaubertForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[str] , _A : Optional[Any] , _A : str , _A : Union[str, Any] , _A : List[Any] , _A : int , _A : List[Any] , _A : str , _A : List[str] , ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = FlaubertForTokenClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : str , _A : Optional[Any] , _A : List[str] , _A : Any , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : List[str] , ):
_UpperCamelCase = self.num_choices
_UpperCamelCase = FlaubertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict , _A : Dict , _A : Tuple , _A : int , _A : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self : str , _A : Any , _A : List[str] , _A : Optional[int]=False ):
_UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = FlaubertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_A )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=_A )
_UpperCamelCase = self._prepare_for_class(_A , _A )
_UpperCamelCase = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
_UpperCamelCase = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
_UpperCamelCase = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 10 | import logging
import os
from .state import PartialState
class lowerCAmelCase_ ( logging.LoggerAdapter ):
@staticmethod
def UpperCamelCase_ ( _A : Any ):
_UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : str , *_A : int , **_A : List[Any] ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCamelCase = kwargs.pop('''main_process_only''' , _A )
_UpperCamelCase = kwargs.pop('''in_order''' , _A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
elif in_order:
_UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
state.wait_for_everyone()
def _snake_case ( __snake_case , __snake_case = None ):
if log_level is None:
_UpperCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __snake_case )
_UpperCamelCase = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case , {} )
| 10 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_lowerCAmelCase = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowerCAmelCase = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowerCAmelCase = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowerCAmelCase = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
_lowerCAmelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
_lowerCAmelCase = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def _snake_case ( __snake_case ):
if isinstance(__snake_case , __snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False ):
_UpperCamelCase = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
_UpperCamelCase = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
_UpperCamelCase = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
_UpperCamelCase = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
_UpperCamelCase = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
_UpperCamelCase = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
_UpperCamelCase = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
_UpperCamelCase = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
_UpperCamelCase = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
_UpperCamelCase = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_UpperCamelCase = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
_UpperCamelCase = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_UpperCamelCase = checkpoint[f"""{old_prefix}.norm.weight"""]
_UpperCamelCase = checkpoint[f"""{old_prefix}.norm.bias"""]
_UpperCamelCase = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = torch.load(__snake_case , map_location='''cpu''' )
_UpperCamelCase = {}
_UpperCamelCase = checkpoint['''time_embed.0.weight''']
_UpperCamelCase = checkpoint['''time_embed.0.bias''']
_UpperCamelCase = checkpoint['''time_embed.2.weight''']
_UpperCamelCase = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase = checkpoint['''label_emb.weight''']
_UpperCamelCase = checkpoint['''input_blocks.0.0.weight''']
_UpperCamelCase = checkpoint['''input_blocks.0.0.bias''']
_UpperCamelCase = unet_config['''down_block_types''']
_UpperCamelCase = unet_config['''layers_per_block''']
_UpperCamelCase = unet_config['''attention_head_dim''']
_UpperCamelCase = unet_config['''block_out_channels''']
_UpperCamelCase = 1
_UpperCamelCase = channels_list[0]
for i, layer_type in enumerate(__snake_case ):
_UpperCamelCase = channels_list[i]
_UpperCamelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__snake_case ):
_UpperCamelCase = f"""down_blocks.{i}.resnets.{j}"""
_UpperCamelCase = f"""input_blocks.{current_layer}.0"""
_UpperCamelCase = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__snake_case ):
_UpperCamelCase = f"""down_blocks.{i}.resnets.{j}"""
_UpperCamelCase = f"""input_blocks.{current_layer}.0"""
_UpperCamelCase = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
_UpperCamelCase = f"""down_blocks.{i}.attentions.{j}"""
_UpperCamelCase = f"""input_blocks.{current_layer}.1"""
_UpperCamelCase = convert_attention(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
current_layer += 1
if i != len(__snake_case ) - 1:
_UpperCamelCase = f"""down_blocks.{i}.downsamplers.0"""
_UpperCamelCase = f"""input_blocks.{current_layer}.0"""
_UpperCamelCase = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
current_layer += 1
_UpperCamelCase = current_channels
# hardcoded the mid-block for now
_UpperCamelCase = '''mid_block.resnets.0'''
_UpperCamelCase = '''middle_block.0'''
_UpperCamelCase = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
_UpperCamelCase = '''mid_block.attentions.0'''
_UpperCamelCase = '''middle_block.1'''
_UpperCamelCase = convert_attention(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
_UpperCamelCase = '''mid_block.resnets.1'''
_UpperCamelCase = '''middle_block.2'''
_UpperCamelCase = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
_UpperCamelCase = 0
_UpperCamelCase = unet_config['''up_block_types''']
for i, layer_type in enumerate(__snake_case ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase = f"""up_blocks.{i}.resnets.{j}"""
_UpperCamelCase = f"""output_blocks.{current_layer}.0"""
_UpperCamelCase = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
current_layer += 1
if i != len(__snake_case ) - 1:
_UpperCamelCase = f"""up_blocks.{i}.upsamplers.0"""
_UpperCamelCase = f"""output_blocks.{current_layer-1}.1"""
_UpperCamelCase = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase = f"""up_blocks.{i}.resnets.{j}"""
_UpperCamelCase = f"""output_blocks.{current_layer}.0"""
_UpperCamelCase = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
_UpperCamelCase = f"""up_blocks.{i}.attentions.{j}"""
_UpperCamelCase = f"""output_blocks.{current_layer}.1"""
_UpperCamelCase = convert_attention(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
current_layer += 1
if i != len(__snake_case ) - 1:
_UpperCamelCase = f"""up_blocks.{i}.upsamplers.0"""
_UpperCamelCase = f"""output_blocks.{current_layer-1}.2"""
_UpperCamelCase = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
_UpperCamelCase = checkpoint['''out.0.weight''']
_UpperCamelCase = checkpoint['''out.0.bias''']
_UpperCamelCase = checkpoint['''out.2.weight''']
_UpperCamelCase = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = strabool(args.class_cond)
_lowerCAmelCase = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
_lowerCAmelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCAmelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_lowerCAmelCase = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
_lowerCAmelCase = None
_lowerCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config)
_lowerCAmelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_lowerCAmelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_lowerCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
_lowerCAmelCase = CMStochasticIterativeScheduler(**scheduler_config)
_lowerCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 10 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = "▁"
_lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = BertGenerationTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[str] ):
super().setUp()
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''<s>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_A ) , 1002 )
def UpperCamelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''Hello World!'''
_UpperCamelCase = [1_8536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_UpperCamelCase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCamelCase_ ( self : Dict ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCamelCase = ''' '''.join(_A )
_UpperCamelCase = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = BertGenerationConfig()
_UpperCamelCase = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["ViTFeatureExtractor"]
_lowerCAmelCase = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default="tab_fact", metadata={"help": "The name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(
default="tab_fact", metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}, )
UpperCAmelCase = field(
default=1024, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "A csv or a json file containing the training data."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "A csv or a json file containing the validation data."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "A csv or a json file containing the test data."} )
def UpperCamelCase_ ( self : Optional[Any] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
_UpperCamelCase = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCamelCase = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, )
UpperCAmelCase = field(
default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
}, )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(__snake_case )
datasets.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCamelCase = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCamelCase = data_args.train_file.split('''.''' )[-1]
_UpperCamelCase = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCamelCase = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
_UpperCamelCase = load_dataset('''csv''' , data_files=__snake_case , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCamelCase = load_dataset('''json''' , data_files=__snake_case , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCamelCase = raw_datasets['''train'''].features['''label'''].names
_UpperCamelCase = len(__snake_case )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCamelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__snake_case , )
_UpperCamelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCamelCase = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCamelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCamelCase = {'''Refused''': 0, '''Entailed''': 1}
_UpperCamelCase = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_UpperCamelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__snake_case ):
# Tokenize the texts
def _convert_table_text_to_pandas(__snake_case ):
_UpperCamelCase = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
_UpperCamelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCamelCase = examples['''statement''']
_UpperCamelCase = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
_UpperCamelCase = tokenizer(__snake_case , __snake_case , padding=__snake_case , max_length=__snake_case , truncation=__snake_case )
_UpperCamelCase = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
_UpperCamelCase = raw_datasets.map(
__snake_case , batched=__snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
_UpperCamelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
_UpperCamelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
_UpperCamelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
_UpperCamelCase = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
_UpperCamelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__snake_case ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__snake_case ):
_UpperCamelCase = p.predictions[0] if isinstance(p.predictions , __snake_case ) else p.predictions
_UpperCamelCase = np.argmax(__snake_case , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCamelCase = default_data_collator
elif training_args.fpaa:
_UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 )
else:
_UpperCamelCase = None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__snake_case , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
_UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
_UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case )
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate(eval_dataset=__snake_case )
_UpperCamelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case )
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCamelCase = predict_dataset.remove_columns('''label''' )
_UpperCamelCase = trainer.predict(__snake_case , metric_key_prefix='''predict''' ).predictions
_UpperCamelCase = np.argmax(__snake_case , axis=1 )
_UpperCamelCase = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(__snake_case ):
_UpperCamelCase = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
_UpperCamelCase = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 | from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( __snake_case , __snake_case ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__snake_case , __snake_case ) ) )
def _snake_case ( __snake_case , __snake_case ):
if dataset.ndim != value_array.ndim:
_UpperCamelCase = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = []
for value in value_array:
_UpperCamelCase = euclidean(__snake_case , dataset[0] )
_UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase = euclidean(__snake_case , __snake_case )
if dist > temp_dist:
_UpperCamelCase = temp_dist
_UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( __snake_case , __snake_case ):
return np.dot(__snake_case , __snake_case ) / (norm(__snake_case ) * norm(__snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
from __future__ import annotations
from collections.abc import Callable
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 100 , ):
_UpperCamelCase = x_start
_UpperCamelCase = fnc(__snake_case )
_UpperCamelCase = 0.0
for _ in range(__snake_case ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_UpperCamelCase = (x_end - x_start) / steps + xa
_UpperCamelCase = fnc(__snake_case )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_UpperCamelCase = xa
_UpperCamelCase = fxa
return area
if __name__ == "__main__":
def _snake_case ( __snake_case ):
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
_lowerCAmelCase = 10
while i <= 100_000:
print(f'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 10 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ShapEPipeline
UpperCAmelCase = ["prompt"]
UpperCAmelCase = ["prompt"]
UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase = PriorTransformer(**_A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase = ShapERenderer(**_A )
return model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.dummy_prior
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = self.dummy_tokenizer
_UpperCamelCase = self.dummy_renderer
_UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
_UpperCamelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch_device == '''cpu'''
_UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase = batch_size * [inputs[key]]
_UpperCamelCase = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(0 )
_UpperCamelCase = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 10 | 1 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
def __init__( self : int , _A : int , _A : Dict=13 , _A : Optional[Any]=32 , _A : List[str]=3 , _A : Tuple=4 , _A : Union[str, Any]=[10, 20, 30, 40] , _A : Optional[int]=[2, 2, 3, 2] , _A : Union[str, Any]=True , _A : str=True , _A : List[Any]=37 , _A : List[str]="gelu" , _A : str=10 , _A : Optional[Any]=0.02 , _A : Optional[Any]=["stage2", "stage3", "stage4"] , _A : Any=[2, 3, 4] , _A : Union[str, Any]=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = num_stages
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = num_labels
_UpperCamelCase = initializer_range
_UpperCamelCase = out_features
_UpperCamelCase = out_indices
_UpperCamelCase = scope
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : int ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCamelCase_ ( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] , _A : Dict ):
_UpperCamelCase = ConvNextModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase_ ( self : Optional[int] , _A : Optional[Any] , _A : Tuple , _A : int ):
_UpperCamelCase = ConvNextForImageClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : List[str] , _A : Union[str, Any] , _A : Dict , _A : Any ):
_UpperCamelCase = ConvNextBackbone(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCamelCase = None
_UpperCamelCase = ConvNextBackbone(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = ConvNextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCamelCase_ ( self : str ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : str ):
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def UpperCamelCase_ ( self : Optional[int] ):
pass
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(_A )
_UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_A )
def UpperCamelCase_ ( self : List[str] ):
def check_hidden_states_output(_A : int , _A : Optional[int] , _A : Dict ):
_UpperCamelCase = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(_A , _A ) )
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(_A , _A , _A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def UpperCamelCase_ ( self : List[str] ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ConvNextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _snake_case ( ):
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : int ):
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(_A )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**_A )
# verify the logits
_UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
_UpperCamelCase = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase, __lowercase ):
UpperCAmelCase = (ConvNextBackbone,) if is_torch_available() else ()
UpperCAmelCase = ConvNextConfig
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = ConvNextModelTester(self )
| 10 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCAmelCase = HfApi()
_lowerCAmelCase = {}
# fmt: off
_lowerCAmelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_lowerCAmelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_lowerCAmelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_lowerCAmelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_lowerCAmelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_lowerCAmelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_lowerCAmelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_lowerCAmelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_lowerCAmelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_lowerCAmelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_lowerCAmelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_lowerCAmelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_lowerCAmelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_lowerCAmelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_lowerCAmelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_lowerCAmelCase = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCAmelCase = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith("CompVis"):
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 10 | 1 |
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
# Return True if there is node that has not iterated.
_UpperCamelCase = [False] * len(__snake_case )
_UpperCamelCase = []
queue.append(__snake_case )
_UpperCamelCase = True
while queue:
_UpperCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__snake_case )
_UpperCamelCase = True
_UpperCamelCase = u
return visited[t]
def _snake_case ( __snake_case , __snake_case , __snake_case ):
# This array is filled by BFS and to store path
_UpperCamelCase = [-1] * (len(__snake_case ))
_UpperCamelCase = 0
while bfs(__snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = float('''Inf''' )
_UpperCamelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase = min(__snake_case , graph[parent[s]][s] )
_UpperCamelCase = parent[s]
max_flow += path_flow
_UpperCamelCase = sink
while v != source:
_UpperCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase = parent[v]
return max_flow
_lowerCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_lowerCAmelCase, _lowerCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 10 | from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( __snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += [key]
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
def _snake_case ( *__snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += keys
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
class lowerCAmelCase_ ( __lowercase ):
def __new__( cls : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ):
_UpperCamelCase = super().__new__(cls , _A , _A , _A )
if not hasattr(_A , '''key_handler''' ):
setattr(_A , '''key_handler''' , {} )
setattr(_A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCamelCase = getattr(_A , '''handle_key''' , [] )
for key in handled_keys:
_UpperCamelCase = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls : str ):
_UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCamelCase = ord(_A )
_UpperCamelCase = cls.key_handler.get(_A )
if handler:
_UpperCamelCase = char
return handler(cls )
else:
return None
def _snake_case ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 10 | 1 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = OmegaConf.load(__snake_case )
_UpperCamelCase = torch.load(__snake_case , map_location='''cpu''' )['''model''']
_UpperCamelCase = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCamelCase = {}
_UpperCamelCase = '''first_stage_model.'''
for key in keys:
if key.startswith(__snake_case ):
_UpperCamelCase = state_dict[key]
# extract state_dict for UNetLDM
_UpperCamelCase = {}
_UpperCamelCase = '''model.diffusion_model.'''
for key in keys:
if key.startswith(__snake_case ):
_UpperCamelCase = state_dict[key]
_UpperCamelCase = config.model.params.first_stage_config.params
_UpperCamelCase = config.model.params.unet_config.params
_UpperCamelCase = VQModel(**__snake_case ).eval()
vqvae.load_state_dict(__snake_case )
_UpperCamelCase = UNetLDMModel(**__snake_case ).eval()
unet.load_state_dict(__snake_case )
_UpperCamelCase = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__snake_case , )
_UpperCamelCase = LDMPipeline(__snake_case , __snake_case , __snake_case )
pipeline.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
_lowerCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 10 | import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
] , )
_UpperCamelCase = text_generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
] , )
@require_tf
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_A )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def UpperCamelCase_ ( self : int , _A : str , _A : Union[str, Any] , _A : Any ):
_UpperCamelCase = TextGenerationPipeline(model=_A , tokenizer=_A )
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = '''Hello I believe in'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
_UpperCamelCase = text_generator(_A )
self.assertEqual(
_A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
_UpperCamelCase = text_generator(_A , stop_sequence=''' fe''' )
self.assertEqual(_A , [{'''generated_text''': '''Hello I believe in fe'''}] )
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : Union[str, Any] ):
_UpperCamelCase = text_generator.model
_UpperCamelCase = text_generator.tokenizer
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = pipeline(task='''text-generation''' , model=_A , tokenizer=_A , return_full_text=_A )
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCamelCase = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_text=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_tensors=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_text=_A , return_tensors=_A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCamelCase = text_generator('''''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCamelCase = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
_UpperCamelCase = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_A ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
# Classic `model_kwargs`
_UpperCamelCase = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_A , top_p=0.5 )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = '''Hello world'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
_UpperCamelCase = logging.get_logger('''transformers.generation.tf_utils''' )
else:
_UpperCamelCase = logging.get_logger('''transformers.generation.utils''' )
_UpperCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 , max_new_tokens=1 )
self.assertIn(_A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_new_tokens=1 )
self.assertNotIn(_A , cl.out )
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 )
self.assertNotIn(_A , cl.out )
| 10 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "spiece.model"}
_lowerCAmelCase = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Any , _A : Optional[int] , _A : Union[str, Any]=False , _A : Dict=True , _A : str=False , _A : int="<s>" , _A : Optional[Any]="</s>" , _A : Any="<unk>" , _A : List[Any]="<sep>" , _A : Any="<pad>" , _A : List[str]="<cls>" , _A : Tuple="<mask>" , _A : List[Any]=["<eop>", "<eod>"] , _A : Optional[Dict[str, Any]] = None , **_A : Optional[Any] , ):
_UpperCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
_UpperCamelCase = 3
_UpperCamelCase = do_lower_case
_UpperCamelCase = remove_space
_UpperCamelCase = keep_accents
_UpperCamelCase = vocab_file
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
_UpperCamelCase = jieba
_UpperCamelCase = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase_ ( self : int ):
return len(self.sp_model )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : str , _A : Optional[int] ):
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Any ):
if self.remove_space:
_UpperCamelCase = ''' '''.join(inputs.strip().split() )
else:
_UpperCamelCase = inputs
_UpperCamelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_UpperCamelCase = unicodedata.normalize('''NFKD''' , _A )
_UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_UpperCamelCase = outputs.lower()
return outputs
def UpperCamelCase_ ( self : Optional[Any] , _A : str ):
_UpperCamelCase = self.preprocess_text(_A )
_UpperCamelCase = self.sp_model.encode(_A , out_type=_A )
_UpperCamelCase = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_UpperCamelCase = cur_pieces[1:]
else:
_UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def UpperCamelCase_ ( self : List[str] , _A : Optional[Any] ):
return self.sp_model.PieceToId(_A )
def UpperCamelCase_ ( self : int , _A : Any ):
return self.sp_model.IdToPiece(_A )
def UpperCamelCase_ ( self : Optional[int] , _A : Optional[int] ):
_UpperCamelCase = ''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ):
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def UpperCamelCase_ ( self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None ):
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase_ ( self : Union[str, Any] , _A : str , _A : Optional[str] = None ):
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def UpperCamelCase_ ( self : Tuple , *_A : Any , **_A : str ):
_UpperCamelCase = super()._decode(*_A , **_A )
_UpperCamelCase = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 10 | def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = (n * (n + 1) // 2) ** 2
_UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "dpr"
def __init__( self : List[str] , _A : Optional[Any]=3_0522 , _A : int=768 , _A : List[Any]=12 , _A : Dict=12 , _A : Union[str, Any]=3072 , _A : Dict="gelu" , _A : int=0.1 , _A : Dict=0.1 , _A : Union[str, Any]=512 , _A : int=2 , _A : Any=0.02 , _A : Optional[Any]=1e-12 , _A : int=0 , _A : Optional[int]="absolute" , _A : int = 0 , **_A : Optional[int] , ):
super().__init__(pad_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 10 | import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
def constraint_to_multiple_of(__snake_case , __snake_case , __snake_case=0 , __snake_case=None ):
_UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCamelCase = (output_size, output_size) if isinstance(__snake_case , __snake_case ) else output_size
_UpperCamelCase , _UpperCamelCase = get_image_size(__snake_case )
_UpperCamelCase , _UpperCamelCase = output_size
# determine new height and width
_UpperCamelCase = output_height / input_height
_UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCamelCase = scale_width
else:
# fit height
_UpperCamelCase = scale_height
_UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__snake_case )
_UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__snake_case )
return (new_height, new_width)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = False , _A : int = 1 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : List[str] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''height''': 384, '''width''': 384}
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : bool = False , _A : int = 1 , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
_UpperCamelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(
_A , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : int , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : int = None , _A : bool = None , _A : int = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
def UpperCamelCase_ ( self : Any , _A : Any , _A : List[Tuple] = None ):
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_A ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(_A ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 10 | 1 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "segformer"
def __init__( self : Optional[Any] , _A : List[Any]=3 , _A : Tuple=4 , _A : Any=[2, 2, 2, 2] , _A : Dict=[8, 4, 2, 1] , _A : int=[32, 64, 160, 256] , _A : Any=[7, 3, 3, 3] , _A : Any=[4, 2, 2, 2] , _A : str=[1, 2, 5, 8] , _A : Optional[Any]=[4, 4, 4, 4] , _A : Dict="gelu" , _A : Optional[Any]=0.0 , _A : Any=0.0 , _A : Tuple=0.1 , _A : Optional[Any]=0.02 , _A : Dict=0.1 , _A : Dict=1e-6 , _A : int=256 , _A : Dict=255 , **_A : List[Any] , ):
super().__init__(**_A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , _A , )
_UpperCamelCase = num_channels
_UpperCamelCase = num_encoder_blocks
_UpperCamelCase = depths
_UpperCamelCase = sr_ratios
_UpperCamelCase = hidden_sizes
_UpperCamelCase = patch_sizes
_UpperCamelCase = strides
_UpperCamelCase = mlp_ratios
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = classifier_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = drop_path_rate
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = decoder_hidden_size
_UpperCamelCase = kwargs.get('''reshape_last_stage''' , _A )
_UpperCamelCase = semantic_loss_ignore_index
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = version.parse("1.11" )
@property
def UpperCamelCase_ ( self : Any ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase_ ( self : str ):
return 1e-4
@property
def UpperCamelCase_ ( self : int ):
return 12
| 10 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : str , _A : List[str] , _A : Optional[Any] , _A : List[str] , _A : Optional[int]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 10 | 1 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
] , )
_UpperCamelCase = text_generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
] , )
@require_tf
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_A )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def UpperCamelCase_ ( self : int , _A : str , _A : Union[str, Any] , _A : Any ):
_UpperCamelCase = TextGenerationPipeline(model=_A , tokenizer=_A )
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = '''Hello I believe in'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
_UpperCamelCase = text_generator(_A )
self.assertEqual(
_A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
_UpperCamelCase = text_generator(_A , stop_sequence=''' fe''' )
self.assertEqual(_A , [{'''generated_text''': '''Hello I believe in fe'''}] )
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : Union[str, Any] ):
_UpperCamelCase = text_generator.model
_UpperCamelCase = text_generator.tokenizer
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = pipeline(task='''text-generation''' , model=_A , tokenizer=_A , return_full_text=_A )
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCamelCase = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_text=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_tensors=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_text=_A , return_tensors=_A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCamelCase = text_generator('''''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCamelCase = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
_UpperCamelCase = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_A ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
# Classic `model_kwargs`
_UpperCamelCase = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_A , top_p=0.5 )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = '''Hello world'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
_UpperCamelCase = logging.get_logger('''transformers.generation.tf_utils''' )
else:
_UpperCamelCase = logging.get_logger('''transformers.generation.utils''' )
_UpperCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 , max_new_tokens=1 )
self.assertIn(_A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_new_tokens=1 )
self.assertNotIn(_A , cl.out )
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 )
self.assertNotIn(_A , cl.out )
| 10 | import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_lowerCAmelCase = True
from torch.cuda.amp import autocast
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase = field(
default=0.1, metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
}, )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."}, )
UpperCAmelCase = field(
default=0.0_5, metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
}, )
UpperCAmelCase = field(default=0.0, metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(
default="train+validation", metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The number of processes to use for the preprocessing."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
}, )
UpperCAmelCase = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"], metadata={"help": "A list of characters to remove from the transcripts."}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
def __call__( self : Union[str, Any] , _A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_UpperCamelCase = [{'''input_values''': feature['''input_values''']} for feature in features]
_UpperCamelCase = [{'''input_ids''': feature['''labels''']} for feature in features]
_UpperCamelCase = self.processor.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_UpperCamelCase = self.processor.pad(
labels=_A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_UpperCamelCase = labels
return batch
class lowerCAmelCase_ ( __lowercase ):
def UpperCamelCase_ ( self : Dict , _A : nn.Module , _A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
_UpperCamelCase = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(_A , _A )
else:
_UpperCamelCase = self.compute_loss(_A , _A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
return loss.detach()
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
_UpperCamelCase = f"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__snake_case ):
_UpperCamelCase = re.sub(__snake_case , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
_UpperCamelCase = train_dataset.map(__snake_case , remove_columns=['''sentence'''] )
_UpperCamelCase = eval_dataset.map(__snake_case , remove_columns=['''sentence'''] )
def extract_all_chars(__snake_case ):
_UpperCamelCase = ''' '''.join(batch['''text'''] )
_UpperCamelCase = list(set(__snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=train_dataset.column_names , )
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=eval_dataset.column_names , )
_UpperCamelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
_UpperCamelCase = {v: k for k, v in enumerate(__snake_case )}
_UpperCamelCase = vocab_dict[''' ''']
del vocab_dict[" "]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(__snake_case )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(__snake_case , __snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=__snake_case , return_attention_mask=__snake_case )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
_UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(__snake_case ) , data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(__snake_case ) )
if data_args.max_val_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__snake_case ):
_UpperCamelCase , _UpperCamelCase = torchaudio.load(batch['''path'''] )
_UpperCamelCase = resampler(__snake_case ).squeeze().numpy()
_UpperCamelCase = 16000
_UpperCamelCase = batch['''text''']
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__snake_case ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_UpperCamelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__snake_case )
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase = datasets.load_metric('''wer''' )
def compute_metrics(__snake_case ):
_UpperCamelCase = pred.predictions
_UpperCamelCase = np.argmax(__snake_case , axis=-1 )
_UpperCamelCase = processor.tokenizer.pad_token_id
_UpperCamelCase = processor.batch_decode(__snake_case )
# we do not want to group tokens when computing the metrics
_UpperCamelCase = processor.batch_decode(pred.label_ids , group_tokens=__snake_case )
_UpperCamelCase = wer_metric.compute(predictions=__snake_case , references=__snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase = DataCollatorCTCWithPadding(processor=__snake_case , padding=__snake_case )
# Initialize our Trainer
_UpperCamelCase = CTCTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , compute_metrics=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case )
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
return results
if __name__ == "__main__":
main()
| 10 | 1 |
import math
def _snake_case ( __snake_case ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( __snake_case = 10001 ):
try:
_UpperCamelCase = int(__snake_case )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
_UpperCamelCase = []
_UpperCamelCase = 2
while len(__snake_case ) < nth:
if is_prime(__snake_case ):
primes.append(__snake_case )
num += 1
else:
num += 1
return primes[len(__snake_case ) - 1]
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | import math
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : int=0 ): # a graph with Node 0,1,...,N-1
_UpperCamelCase = n
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # adjacency matrix for weight
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase_ ( self : Dict , _A : str , _A : List[str] , _A : Optional[Any] ):
_UpperCamelCase = w
def UpperCamelCase_ ( self : Optional[int] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] , _A : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 10 | 1 |
from collections.abc import Generator
from math import sin
def _snake_case ( __snake_case ):
if len(__snake_case ) != 32:
raise ValueError('''Input must be of length 32''' )
_UpperCamelCase = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _snake_case ( __snake_case ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
_UpperCamelCase = format(__snake_case , '''08x''' )[-8:]
_UpperCamelCase = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def _snake_case ( __snake_case ):
_UpperCamelCase = B''''''
for char in message:
bit_string += format(__snake_case , '''08b''' ).encode('''utf-8''' )
_UpperCamelCase = format(len(__snake_case ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__snake_case ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _snake_case ( __snake_case ):
if len(__snake_case ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(__snake_case ) , 512 ):
_UpperCamelCase = bit_string[pos : pos + 512]
_UpperCamelCase = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _snake_case ( __snake_case ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
_UpperCamelCase = format(__snake_case , '''032b''' )
_UpperCamelCase = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__snake_case , 2 )
def _snake_case ( __snake_case , __snake_case ):
return (a + b) % 2**32
def _snake_case ( __snake_case , __snake_case ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _snake_case ( __snake_case ):
_UpperCamelCase = preprocess(__snake_case )
_UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_UpperCamelCase = 0x6745_2301
_UpperCamelCase = 0xEFCD_AB89
_UpperCamelCase = 0x98BA_DCFE
_UpperCamelCase = 0x1032_5476
_UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__snake_case ):
_UpperCamelCase = aa
_UpperCamelCase = ba
_UpperCamelCase = ca
_UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_UpperCamelCase = d ^ (b & (c ^ d))
_UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_UpperCamelCase = c ^ (d & (b ^ c))
_UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
_UpperCamelCase = b ^ c ^ d
_UpperCamelCase = (3 * i + 5) % 16
else:
_UpperCamelCase = c ^ (b | not_aa(__snake_case ))
_UpperCamelCase = (7 * i) % 16
_UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
_UpperCamelCase = d
_UpperCamelCase = c
_UpperCamelCase = b
_UpperCamelCase = sum_aa(__snake_case , left_rotate_aa(__snake_case , shift_amounts[i] ) )
# Add hashed chunk to running total
_UpperCamelCase = sum_aa(__snake_case , __snake_case )
_UpperCamelCase = sum_aa(__snake_case , __snake_case )
_UpperCamelCase = sum_aa(__snake_case , __snake_case )
_UpperCamelCase = sum_aa(__snake_case , __snake_case )
_UpperCamelCase = reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = list_field(
default=[], metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
}, )
UpperCAmelCase = list_field(
default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
UpperCAmelCase = list_field(
default=[8, 32, 128, 512], metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Use FP16 to accelerate inference."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Benchmark training of model"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Verbose memory tracing"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Trace memory line by line"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save result to a CSV file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save all print statements in a log file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to print environment information"} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
}, )
UpperCAmelCase = field(
default=F"""inference_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv."}, )
UpperCAmelCase = field(
default=F"""inference_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv."}, )
UpperCAmelCase = field(
default=F"""train_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv for training."}, )
UpperCAmelCase = field(
default=F"""train_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv for training."}, )
UpperCAmelCase = field(
default=F"""env_info_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving environment information."}, )
UpperCAmelCase = field(
default=F"""log_{round(time() )}.csv""", metadata={"help": "Log filename used if print statements are saved in log."}, )
UpperCAmelCase = field(default=3, metadata={"help": "Times an experiment will be run."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
}, )
def UpperCamelCase_ ( self : Union[str, Any] ):
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _A , )
def UpperCamelCase_ ( self : str ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : List[Any] ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 10 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase = 10
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
for i in range(__snake_case , __snake_case ):
if array[i] == target:
return i
return -1
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = 0
_UpperCamelCase = len(__snake_case )
while left <= right:
if right - left < precision:
return lin_search(__snake_case , __snake_case , __snake_case , __snake_case )
_UpperCamelCase = (left + right) // 3 + 1
_UpperCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_UpperCamelCase = one_third - 1
elif array[two_third] < target:
_UpperCamelCase = two_third + 1
else:
_UpperCamelCase = one_third + 1
_UpperCamelCase = two_third - 1
else:
return -1
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
if left < right:
if right - left < precision:
return lin_search(__snake_case , __snake_case , __snake_case , __snake_case )
_UpperCamelCase = (left + right) // 3 + 1
_UpperCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__snake_case , one_third - 1 , __snake_case , __snake_case )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __snake_case , __snake_case , __snake_case )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __snake_case , __snake_case )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input("Enter numbers separated by comma:\n").strip()
_lowerCAmelCase = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase = int(input("Enter the number to be found in the list:\n").strip())
_lowerCAmelCase = ite_ternary_search(collection, target)
_lowerCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'Iterative search: {target} found at positions: {resulta}')
print(f'Recursive search: {target} found at positions: {resulta}')
else:
print("Not found")
| 10 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *__snake_case , __snake_case = None , __snake_case=True , __snake_case=2 ):
from .. import __version__
_UpperCamelCase = take_from
_UpperCamelCase = ()
if not isinstance(args[0] , __snake_case ):
_UpperCamelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__snake_case ).base_version ) >= version.parse(__snake_case ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
_UpperCamelCase = None
if isinstance(__snake_case , __snake_case ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__snake_case ),)
_UpperCamelCase = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__snake_case , __snake_case ):
values += (getattr(__snake_case , __snake_case ),)
_UpperCamelCase = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_UpperCamelCase = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_UpperCamelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __snake_case , stacklevel=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0:
_UpperCamelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCamelCase = call_frame.filename
_UpperCamelCase = call_frame.lineno
_UpperCamelCase = call_frame.function
_UpperCamelCase , _UpperCamelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__snake_case ) == 0:
return
elif len(__snake_case ) == 1:
return values[0]
return values
| 10 | 1 |
from sklearn.metrics import matthews_corrcoef
import datasets
_lowerCAmelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
_lowerCAmelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
_lowerCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def UpperCamelCase_ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def UpperCamelCase_ ( self : List[str] , _A : List[str] , _A : str , _A : str=None ):
return {
"matthews_correlation": float(matthews_corrcoef(_A , _A , sample_weight=_A ) ),
}
| 10 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case , __snake_case ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(__snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__snake_case ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__snake_case , p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __snake_case , __snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case=False ):
_UpperCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _snake_case ( __snake_case , __snake_case , __snake_case=False ):
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCamelCase = ''''''
else:
_UpperCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_UpperCamelCase = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCamelCase = in_proj_bias[: config.hidden_size]
_UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase = in_proj_bias[-config.hidden_size :]
def _snake_case ( __snake_case ):
_UpperCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = dct.pop(__snake_case )
_UpperCamelCase = val
def _snake_case ( ):
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case=True ):
_UpperCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCamelCase = 8
# set labels if required
if not base_model:
_UpperCamelCase = 1000
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCamelCase = 384
_UpperCamelCase = 1536
_UpperCamelCase = 12
_UpperCamelCase = 6
# load original model from torch hub
_UpperCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
_UpperCamelCase = create_rename_keys(__snake_case , base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_q_k_v(__snake_case , __snake_case , __snake_case )
# load HuggingFace model
if base_model:
_UpperCamelCase = ViTModel(__snake_case , add_pooling_layer=__snake_case ).eval()
else:
_UpperCamelCase = ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCamelCase = ViTImageProcessor()
_UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_UpperCamelCase = encoding['''pixel_values''']
_UpperCamelCase = model(__snake_case )
if base_model:
_UpperCamelCase = original_model(__snake_case )
assert torch.allclose(__snake_case , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_UpperCamelCase = original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case , outputs.logits , atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
_lowerCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 10 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "trocr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=5_0265 , _A : Optional[Any]=1024 , _A : Optional[Any]=12 , _A : Any=16 , _A : Any=4096 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=512 , _A : Dict=0.1 , _A : List[str]=0.0 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=2 , _A : Any=0.02 , _A : List[str]=0.0 , _A : List[str]=True , _A : str=False , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[int]=1 , _A : int=0 , _A : Any=2 , **_A : Optional[int] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 10 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _snake_case ( __snake_case ):
# A local function to see if a dot lands in the circle.
def is_in_circle(__snake_case , __snake_case ) -> bool:
_UpperCamelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_UpperCamelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__snake_case ) )
# The ratio of the area for circle to square is pi/4.
_UpperCamelCase = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def _snake_case ( __snake_case , __snake_case , __snake_case = 0.0 , __snake_case = 1.0 , ):
return mean(
function_to_integrate(uniform(__snake_case , __snake_case ) ) for _ in range(__snake_case ) ) * (max_value - min_value)
def _snake_case ( __snake_case , __snake_case = 0.0 , __snake_case = 1.0 ):
def identity_function(__snake_case ) -> float:
return x
_UpperCamelCase = area_under_curve_estimator(
__snake_case , __snake_case , __snake_case , __snake_case )
_UpperCamelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print('''******************''' )
def _snake_case ( __snake_case ):
def function_to_integrate(__snake_case ) -> float:
return sqrt(4.0 - x * x )
_UpperCamelCase = area_under_curve_estimator(
__snake_case , __snake_case , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Any=13 , _A : Union[str, Any]=7 , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[Any]=True , _A : Optional[int]=False , _A : Any=False , _A : int=False , _A : Optional[Any]=2 , _A : Any=99 , _A : str=0 , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : Tuple=4 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=512 , _A : Union[str, Any]=12 , _A : List[str]=2 , _A : int=0.02 , _A : Optional[Any]=3 , _A : Any=4 , _A : Optional[int]="last" , _A : Any=None , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_lengths
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = gelu_activation
_UpperCamelCase = sinusoidal_embeddings
_UpperCamelCase = causal
_UpperCamelCase = asm
_UpperCamelCase = n_langs
_UpperCamelCase = vocab_size
_UpperCamelCase = n_special
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = summary_type
_UpperCamelCase = use_proj
_UpperCamelCase = scope
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_input_lengths:
_UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self : str ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Any , _A : str , _A : Optional[int] , ):
_UpperCamelCase = FlaubertModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , lengths=_A , langs=_A )
_UpperCamelCase = model(_A , langs=_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : str , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int , _A : str , _A : List[Any] , _A : Any , ):
_UpperCamelCase = FlaubertWithLMHeadModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : str , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : Dict , ):
_UpperCamelCase = FlaubertForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Tuple , _A : str , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : int , _A : str , _A : Dict , _A : List[Any] , ):
_UpperCamelCase = FlaubertForQuestionAnswering(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : Tuple , _A : str , _A : int , _A : int , _A : Optional[int] , _A : Optional[int] , _A : int , _A : List[str] , ):
_UpperCamelCase = FlaubertForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[str] , _A : Optional[Any] , _A : str , _A : Union[str, Any] , _A : List[Any] , _A : int , _A : List[Any] , _A : str , _A : List[str] , ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = FlaubertForTokenClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : str , _A : Optional[Any] , _A : List[str] , _A : Any , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : List[str] , ):
_UpperCamelCase = self.num_choices
_UpperCamelCase = FlaubertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict , _A : Dict , _A : Tuple , _A : int , _A : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self : str , _A : Any , _A : List[str] , _A : Optional[int]=False ):
_UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = FlaubertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_A )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=_A )
_UpperCamelCase = self._prepare_for_class(_A , _A )
_UpperCamelCase = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
_UpperCamelCase = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
_UpperCamelCase = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 10 | 1 |
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase_ :
def __init__( self : List[str] ):
_UpperCamelCase = {}
def UpperCamelCase_ ( self : Any , _A : str ):
_UpperCamelCase = {}
def UpperCamelCase_ ( self : Dict , _A : str , _A : str , _A : float ):
if nodea not in self.connections:
self.add_node(_A )
if nodea not in self.connections:
self.add_node(_A )
_UpperCamelCase = probability
def UpperCamelCase_ ( self : str ):
return list(self.connections )
def UpperCamelCase_ ( self : str , _A : str ):
_UpperCamelCase = 0
_UpperCamelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__snake_case , __snake_case , __snake_case )
_UpperCamelCase = Counter(graph.get_nodes() )
_UpperCamelCase = start
for _ in range(__snake_case ):
_UpperCamelCase = graph.transition(__snake_case )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 | 1 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = "T5Config"
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "mt5"
UpperCAmelCase = MTaConfig
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "mt5"
UpperCAmelCase = MTaConfig
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "mt5"
UpperCAmelCase = MTaConfig
| 10 | from __future__ import annotations
_lowerCAmelCase = [True] * 1_000_001
_lowerCAmelCase = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
_lowerCAmelCase = False
i += 1
def _snake_case ( __snake_case ):
return seive[n]
def _snake_case ( __snake_case ):
return any(digit in '''02468''' for digit in str(__snake_case ) )
def _snake_case ( __snake_case = 1000000 ):
_UpperCamelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__snake_case ) and not contains_an_even_digit(__snake_case ):
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(__snake_case ) )]
if all(is_prime(__snake_case ) for i in list_nums ):
result.append(__snake_case )
return result
def _snake_case ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 10 | 1 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_lowerCAmelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
_lowerCAmelCase = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
"emoji": True,
},
}
]
_lowerCAmelCase = 0
for log in Path().glob("*.log"):
_lowerCAmelCase = 0
with open(log, "r") as f:
for line in f:
_lowerCAmelCase = json.loads(line)
if line.get("nodeid", "") != "":
_lowerCAmelCase = line["nodeid"]
if line.get("duration", None) is not None:
_lowerCAmelCase = f'{line["duration"]:.4f}'
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_lowerCAmelCase = []
log.unlink()
_lowerCAmelCase = ""
_lowerCAmelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
_lowerCAmelCase = []
_lowerCAmelCase = {}
for test in failed_tests:
_lowerCAmelCase = test[0].split("::")
_lowerCAmelCase = data[0].split("/")[-1]
if data[0] not in filesafailed:
_lowerCAmelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_lowerCAmelCase = [test[0] for test in failed_table]
_lowerCAmelCase = list(set(files))
# Count number of instances in failed_tests
_lowerCAmelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_lowerCAmelCase = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
_lowerCAmelCase = "Too many failed tests, please see the full report in the Action results."
_lowerCAmelCase = len(err) + 10
_lowerCAmelCase = message[: 3_000 - offset] + f'\n...\n```\n{err}'
print(f'### {message}')
else:
_lowerCAmelCase = "No failed tests! 🤗"
print(f'## {message}')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
_lowerCAmelCase = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
_lowerCAmelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
_lowerCAmelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
_lowerCAmelCase = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
_lowerCAmelCase = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
_lowerCAmelCase = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_lowerCAmelCase = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
_lowerCAmelCase = row[0]
else:
_lowerCAmelCase = ""
_lowerCAmelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 10 | import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = DebertaVaTokenizer
UpperCAmelCase = DebertaVaTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = DebertaVaTokenizer(_A , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict , _A : Union[str, Any] ):
_UpperCamelCase = '''this is a test'''
_UpperCamelCase = '''this is a test'''
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_A ) , 3_0001 )
def UpperCamelCase_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase_ ( self : List[str] ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : int ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Tuple ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(_A )
_UpperCamelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''This is a test'''
_UpperCamelCase = [13, 1, 4398, 25, 21, 1289]
_UpperCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = DebertaVaTokenizer(_A , keep_accents=_A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_UpperCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = DebertaVaTokenizer(_A )
_UpperCamelCase = tokenizer.encode('''sequence builders''' )
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 10 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( __snake_case ):
_UpperCamelCase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableDiffusionLatentUpscalePipeline
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
UpperCAmelCase = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase = frozenset([] )
UpperCAmelCase = True
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = 1
_UpperCamelCase = 4
_UpperCamelCase = (16, 16)
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_A )
return image
def UpperCamelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_A , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_A , only_cross_attention=_A , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
_UpperCamelCase = EulerDiscreteScheduler(prediction_type='''sample''' )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
_UpperCamelCase = CLIPTextModel(_A )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase_ ( self : Dict , _A : int , _A : List[Any]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = pipe(**_A ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_UpperCamelCase = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
_UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def UpperCamelCase_ ( self : Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCamelCase_ ( self : int ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCamelCase_ ( self : str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCamelCase_ ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCamelCase_ ( self : Optional[int] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCamelCase_ ( self : int ):
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCamelCase_ ( self : Union[str, Any] ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = 2
_UpperCamelCase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_UpperCamelCase = getattr(_A , scheduler_enum.name )
_UpperCamelCase = scheduler_cls.from_config(pipe.scheduler.config )
_UpperCamelCase = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch.manual_seed(33 )
_UpperCamelCase = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCamelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
_UpperCamelCase = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
_UpperCamelCase = pipe(_A , generator=_A , output_type='''latent''' ).images
_UpperCamelCase = upscaler(
prompt=_A , image=_A , num_inference_steps=20 , guidance_scale=0 , generator=_A , output_type='''np''' , ).images[0]
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = torch.manual_seed(33 )
_UpperCamelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
_UpperCamelCase = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
_UpperCamelCase = upscaler(
prompt=_A , image=_A , num_inference_steps=20 , guidance_scale=0 , generator=_A , output_type='''np''' , ).images[0]
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 10 | import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10 | 1 |
from __future__ import annotations
import math
def _snake_case ( __snake_case ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCAmelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_UpperCamelCase = []
for num in range(len(__snake_case ) ):
_UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
_UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(__snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__snake_case ) == n:
return list_nums
return []
def _snake_case ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | import logging
import os
from .state import PartialState
class lowerCAmelCase_ ( logging.LoggerAdapter ):
@staticmethod
def UpperCamelCase_ ( _A : Any ):
_UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : str , *_A : int , **_A : List[Any] ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCamelCase = kwargs.pop('''main_process_only''' , _A )
_UpperCamelCase = kwargs.pop('''in_order''' , _A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
elif in_order:
_UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
state.wait_for_everyone()
def _snake_case ( __snake_case , __snake_case = None ):
if log_level is None:
_UpperCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __snake_case )
_UpperCamelCase = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case , {} )
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = "▁"
_lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = BertGenerationTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[str] ):
super().setUp()
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''<s>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_A ) , 1002 )
def UpperCamelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''Hello World!'''
_UpperCamelCase = [1_8536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_UpperCamelCase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCamelCase_ ( self : Dict ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCamelCase = ''' '''.join(_A )
_UpperCamelCase = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = BertGenerationConfig()
_UpperCamelCase = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowerCAmelCase = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
_lowerCAmelCase = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
_lowerCAmelCase = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowerCAmelCase = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowerCAmelCase = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def _snake_case ( __snake_case , __snake_case ):
for tf_name, hf_name in patterns:
_UpperCamelCase = k.replace(__snake_case , __snake_case )
return k
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = BigBirdPegasusConfig(**__snake_case )
_UpperCamelCase = BigBirdPegasusForConditionalGeneration(__snake_case )
_UpperCamelCase = torch_model.state_dict()
_UpperCamelCase = {}
# separating decoder weights
_UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
_UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
_UpperCamelCase = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE]
if any(__snake_case ):
continue
_UpperCamelCase = DECODER_PATTERNS
_UpperCamelCase = rename_state_dict_key(__snake_case , __snake_case )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
_UpperCamelCase = v.T
_UpperCamelCase = torch.from_numpy(__snake_case )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
_UpperCamelCase = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE]
if any(__snake_case ):
continue
_UpperCamelCase = REMAINING_PATTERNS
_UpperCamelCase = rename_state_dict_key(__snake_case , __snake_case )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
_UpperCamelCase = v.T
_UpperCamelCase = torch.from_numpy(__snake_case )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
_UpperCamelCase = mapping['''model.embed_positions.weight''']
_UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
_UpperCamelCase , _UpperCamelCase = torch_model.load_state_dict(__snake_case , strict=__snake_case )
_UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def _snake_case ( __snake_case ):
_UpperCamelCase = tf.train.list_variables(__snake_case )
_UpperCamelCase = {}
_UpperCamelCase = ['''global_step''']
for name, shape in tqdm(__snake_case , desc='''converting tf checkpoint to dict''' ):
_UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
_UpperCamelCase = array
return tf_weights
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = get_tf_weights_as_numpy(__snake_case )
_UpperCamelCase = convert_bigbird_pegasus(__snake_case , __snake_case )
torch_model.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 10 | from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( __snake_case , __snake_case ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__snake_case , __snake_case ) ) )
def _snake_case ( __snake_case , __snake_case ):
if dataset.ndim != value_array.ndim:
_UpperCamelCase = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = []
for value in value_array:
_UpperCamelCase = euclidean(__snake_case , dataset[0] )
_UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase = euclidean(__snake_case , __snake_case )
if dist > temp_dist:
_UpperCamelCase = temp_dist
_UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( __snake_case , __snake_case ):
return np.dot(__snake_case , __snake_case ) / (norm(__snake_case ) * norm(__snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
from __future__ import annotations
import math
import random
from typing import Any
class lowerCAmelCase_ :
def __init__( self : str ):
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = 0
def UpperCamelCase_ ( self : Dict ):
return self.head == self.tail
def UpperCamelCase_ ( self : Optional[Any] , _A : Any ):
self.data.append(_A )
_UpperCamelCase = self.tail + 1
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.data[self.head]
_UpperCamelCase = self.head + 1
return ret
def UpperCamelCase_ ( self : Tuple ):
return self.tail - self.head
def UpperCamelCase_ ( self : Dict ):
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class lowerCAmelCase_ :
def __init__( self : Any , _A : Any ):
_UpperCamelCase = data
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = 1
def UpperCamelCase_ ( self : Dict ):
return self.data
def UpperCamelCase_ ( self : List[Any] ):
return self.left
def UpperCamelCase_ ( self : Optional[Any] ):
return self.right
def UpperCamelCase_ ( self : Optional[Any] ):
return self.height
def UpperCamelCase_ ( self : List[Any] , _A : Any ):
_UpperCamelCase = data
def UpperCamelCase_ ( self : Dict , _A : MyNode | None ):
_UpperCamelCase = node
def UpperCamelCase_ ( self : Optional[int] , _A : MyNode | None ):
_UpperCamelCase = node
def UpperCamelCase_ ( self : Union[str, Any] , _A : int ):
_UpperCamelCase = height
def _snake_case ( __snake_case ):
if node is None:
return 0
return node.get_height()
def _snake_case ( __snake_case , __snake_case ):
if a > b:
return a
return b
def _snake_case ( __snake_case ):
print('''left rotation node:''' , node.get_data() )
_UpperCamelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__snake_case )
_UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__snake_case )
_UpperCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__snake_case )
return ret
def _snake_case ( __snake_case ):
print('''right rotation node:''' , node.get_data() )
_UpperCamelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__snake_case )
_UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__snake_case )
_UpperCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__snake_case )
return ret
def _snake_case ( __snake_case ):
_UpperCamelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__snake_case ) )
return right_rotation(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__snake_case ) )
return left_rotation(__snake_case )
def _snake_case ( __snake_case , __snake_case ):
if node is None:
return MyNode(__snake_case )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __snake_case ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_UpperCamelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_UpperCamelCase = right_rotation(__snake_case )
else:
_UpperCamelCase = lr_rotation(__snake_case )
else:
node.set_right(insert_node(node.get_right() , __snake_case ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_UpperCamelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_UpperCamelCase = rl_rotation(__snake_case )
else:
_UpperCamelCase = left_rotation(__snake_case )
_UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__snake_case )
return node
def _snake_case ( __snake_case ):
while True:
_UpperCamelCase = root.get_right()
if right_child is None:
break
_UpperCamelCase = right_child
return root.get_data()
def _snake_case ( __snake_case ):
while True:
_UpperCamelCase = root.get_left()
if left_child is None:
break
_UpperCamelCase = left_child
return root.get_data()
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = root.get_left()
_UpperCamelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_UpperCamelCase = get_left_most(__snake_case )
root.set_data(__snake_case )
root.set_right(del_node(__snake_case , __snake_case ) )
elif left_child is not None:
_UpperCamelCase = left_child
elif right_child is not None:
_UpperCamelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(__snake_case , __snake_case ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__snake_case , __snake_case ) )
if get_height(__snake_case ) - get_height(__snake_case ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_UpperCamelCase = left_rotation(__snake_case )
else:
_UpperCamelCase = rl_rotation(__snake_case )
elif get_height(__snake_case ) - get_height(__snake_case ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_UpperCamelCase = right_rotation(__snake_case )
else:
_UpperCamelCase = lr_rotation(__snake_case )
_UpperCamelCase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__snake_case )
return root
class lowerCAmelCase_ :
def __init__( self : Any ):
_UpperCamelCase = None
def UpperCamelCase_ ( self : Dict ):
return get_height(self.root )
def UpperCamelCase_ ( self : Any , _A : Any ):
print('''insert:''' + str(_A ) )
_UpperCamelCase = insert_node(self.root , _A )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Any ):
print('''delete:''' + str(_A ) )
if self.root is None:
print('''Tree is empty!''' )
return
_UpperCamelCase = del_node(self.root , _A )
def __str__( self : Any , ): # a level traversale, gives a more intuitive look on the tree
_UpperCamelCase = ''''''
_UpperCamelCase = MyQueue()
q.push(self.root )
_UpperCamelCase = self.get_height()
if layer == 0:
return output
_UpperCamelCase = 0
while not q.is_empty():
_UpperCamelCase = q.pop()
_UpperCamelCase = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_A )
q.push(_A )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
_UpperCamelCase = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _A ) - 1:
_UpperCamelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _snake_case ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_lowerCAmelCase = AVLtree()
_lowerCAmelCase = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 10 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ShapEPipeline
UpperCAmelCase = ["prompt"]
UpperCAmelCase = ["prompt"]
UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase = PriorTransformer(**_A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase = ShapERenderer(**_A )
return model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.dummy_prior
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = self.dummy_tokenizer
_UpperCamelCase = self.dummy_renderer
_UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
_UpperCamelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch_device == '''cpu'''
_UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase = batch_size * [inputs[key]]
_UpperCamelCase = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(0 )
_UpperCamelCase = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 10 | 1 |
from torch import nn
def _snake_case ( __snake_case ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 10 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCAmelCase = HfApi()
_lowerCAmelCase = {}
# fmt: off
_lowerCAmelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_lowerCAmelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_lowerCAmelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_lowerCAmelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_lowerCAmelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_lowerCAmelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_lowerCAmelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_lowerCAmelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_lowerCAmelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_lowerCAmelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_lowerCAmelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_lowerCAmelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_lowerCAmelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_lowerCAmelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_lowerCAmelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_lowerCAmelCase = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCAmelCase = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith("CompVis"):
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 10 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default="NER", metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."}, )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
_UpperCamelCase = import_module('''tasks''' )
try:
_UpperCamelCase = getattr(__snake_case , model_args.task_type )
_UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_UpperCamelCase = token_classification_task.get_labels(data_args.labels )
_UpperCamelCase = dict(enumerate(__snake_case ) )
_UpperCamelCase = len(__snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , idalabel=__snake_case , labelaid={label: i for i, label in enumerate(__snake_case )} , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=__snake_case , data_dir=data_args.data_dir , tokenizer=__snake_case , labels=__snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=__snake_case , data_dir=data_args.data_dir , tokenizer=__snake_case , labels=__snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__snake_case , __snake_case ) -> Tuple[List[int], List[int]]:
_UpperCamelCase = np.argmax(__snake_case , axis=2 )
_UpperCamelCase , _UpperCamelCase = preds.shape
_UpperCamelCase = [[] for _ in range(__snake_case )]
_UpperCamelCase = [[] for _ in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__snake_case ) -> Dict:
_UpperCamelCase , _UpperCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__snake_case , __snake_case ),
"precision": precision_score(__snake_case , __snake_case ),
"recall": recall_score(__snake_case , __snake_case ),
"f1": fa_score(__snake_case , __snake_case ),
}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __snake_case , __snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__snake_case )
# Predict
if training_args.do_predict:
_UpperCamelCase = TokenClassificationDataset(
token_classification_task=__snake_case , data_dir=data_args.data_dir , tokenizer=__snake_case , labels=__snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = trainer.predict(__snake_case )
_UpperCamelCase , _UpperCamelCase = align_predictions(__snake_case , __snake_case )
_UpperCamelCase = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__snake_case , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __snake_case , __snake_case )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
_UpperCamelCase = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__snake_case , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__snake_case , __snake_case , __snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 | from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( __snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += [key]
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
def _snake_case ( *__snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += keys
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
class lowerCAmelCase_ ( __lowercase ):
def __new__( cls : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ):
_UpperCamelCase = super().__new__(cls , _A , _A , _A )
if not hasattr(_A , '''key_handler''' ):
setattr(_A , '''key_handler''' , {} )
setattr(_A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCamelCase = getattr(_A , '''handle_key''' , [] )
for key in handled_keys:
_UpperCamelCase = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls : str ):
_UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCamelCase = ord(_A )
_UpperCamelCase = cls.key_handler.get(_A )
if handler:
_UpperCamelCase = char
return handler(cls )
else:
return None
def _snake_case ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
] , )
_UpperCamelCase = text_generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
] , )
@require_tf
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_A )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def UpperCamelCase_ ( self : int , _A : str , _A : Union[str, Any] , _A : Any ):
_UpperCamelCase = TextGenerationPipeline(model=_A , tokenizer=_A )
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = '''Hello I believe in'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
_UpperCamelCase = text_generator(_A )
self.assertEqual(
_A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
_UpperCamelCase = text_generator(_A , stop_sequence=''' fe''' )
self.assertEqual(_A , [{'''generated_text''': '''Hello I believe in fe'''}] )
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : Union[str, Any] ):
_UpperCamelCase = text_generator.model
_UpperCamelCase = text_generator.tokenizer
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = pipeline(task='''text-generation''' , model=_A , tokenizer=_A , return_full_text=_A )
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCamelCase = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_text=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_tensors=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_text=_A , return_tensors=_A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCamelCase = text_generator('''''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCamelCase = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
_UpperCamelCase = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_A ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
# Classic `model_kwargs`
_UpperCamelCase = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_A , top_p=0.5 )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = '''Hello world'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
_UpperCamelCase = logging.get_logger('''transformers.generation.tf_utils''' )
else:
_UpperCamelCase = logging.get_logger('''transformers.generation.utils''' )
_UpperCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 , max_new_tokens=1 )
self.assertIn(_A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_new_tokens=1 )
self.assertNotIn(_A , cl.out )
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 )
self.assertNotIn(_A , cl.out )
| 10 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "unispeech-sat"
def __init__( self : List[Any] , _A : Dict=32 , _A : int=768 , _A : str=12 , _A : str=12 , _A : Any=3072 , _A : List[str]="gelu" , _A : Any=0.1 , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=0.0 , _A : List[str]=0.0 , _A : Optional[Any]=0.1 , _A : str=0.1 , _A : List[str]=0.02 , _A : Optional[int]=1e-5 , _A : Dict="group" , _A : str="gelu" , _A : List[str]=(512, 512, 512, 512, 512, 512, 512) , _A : Any=(5, 2, 2, 2, 2, 2, 2) , _A : Dict=(10, 3, 3, 3, 3, 2, 2) , _A : Union[str, Any]=False , _A : str=128 , _A : Tuple=16 , _A : Optional[int]=False , _A : Dict=True , _A : Optional[Any]=0.05 , _A : Any=10 , _A : str=2 , _A : Dict=0.0 , _A : List[str]=10 , _A : Union[str, Any]=0 , _A : List[str]=320 , _A : List[Any]=2 , _A : Optional[Any]=0.1 , _A : Optional[Any]=100 , _A : List[str]=256 , _A : Any=256 , _A : List[Any]=0.1 , _A : Dict="mean" , _A : Dict=False , _A : List[str]=False , _A : List[Any]=256 , _A : Any=(512, 512, 512, 512, 1500) , _A : Any=(5, 3, 3, 1, 1) , _A : Dict=(1, 2, 3, 1, 1) , _A : str=512 , _A : Dict=0 , _A : List[str]=1 , _A : Tuple=2 , _A : Optional[Any]=504 , **_A : int , ):
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(_A )
_UpperCamelCase = list(_A )
_UpperCamelCase = list(_A )
_UpperCamelCase = conv_bias
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim )
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layerdrop
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = vocab_size
_UpperCamelCase = num_clusters
_UpperCamelCase = do_stable_layer_norm
_UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
_UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCamelCase = num_codevectors_per_group
_UpperCamelCase = num_codevector_groups
_UpperCamelCase = contrastive_logits_temperature
_UpperCamelCase = feat_quantizer_dropout
_UpperCamelCase = num_negatives
_UpperCamelCase = codevector_dim
_UpperCamelCase = proj_codevector_dim
_UpperCamelCase = diversity_loss_weight
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase = list(_A )
_UpperCamelCase = list(_A )
_UpperCamelCase = list(_A )
_UpperCamelCase = xvector_output_dim
@property
def UpperCamelCase_ ( self : List[str] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 10 | def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = (n * (n + 1) // 2) ** 2
_UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _snake_case ( __snake_case ):
_UpperCamelCase = prime_factors(__snake_case )
if is_square_free(__snake_case ):
return -1 if len(__snake_case ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
def constraint_to_multiple_of(__snake_case , __snake_case , __snake_case=0 , __snake_case=None ):
_UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCamelCase = (output_size, output_size) if isinstance(__snake_case , __snake_case ) else output_size
_UpperCamelCase , _UpperCamelCase = get_image_size(__snake_case )
_UpperCamelCase , _UpperCamelCase = output_size
# determine new height and width
_UpperCamelCase = output_height / input_height
_UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCamelCase = scale_width
else:
# fit height
_UpperCamelCase = scale_height
_UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__snake_case )
_UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__snake_case )
return (new_height, new_width)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = False , _A : int = 1 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : List[str] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''height''': 384, '''width''': 384}
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : bool = False , _A : int = 1 , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
_UpperCamelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(
_A , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : int , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : int = None , _A : bool = None , _A : int = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
def UpperCamelCase_ ( self : Any , _A : Any , _A : List[Tuple] = None ):
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_A ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(_A ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 10 | 1 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = DebertaVaTokenizer
UpperCAmelCase = DebertaVaTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = DebertaVaTokenizer(_A , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict , _A : Union[str, Any] ):
_UpperCamelCase = '''this is a test'''
_UpperCamelCase = '''this is a test'''
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_A ) , 3_0001 )
def UpperCamelCase_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase_ ( self : List[str] ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : int ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Tuple ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(_A )
_UpperCamelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''This is a test'''
_UpperCamelCase = [13, 1, 4398, 25, 21, 1289]
_UpperCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = DebertaVaTokenizer(_A , keep_accents=_A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_UpperCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = DebertaVaTokenizer(_A )
_UpperCamelCase = tokenizer.encode('''sequence builders''' )
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 10 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : str , _A : List[str] , _A : Optional[Any] , _A : List[str] , _A : Optional[int]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 10 | 1 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class lowerCAmelCase_ :
def __init__( self : Any , _A : int=None , **_A : Optional[int] ):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
_UpperCamelCase = model
_UpperCamelCase = kwargs.get('''model_save_dir''' , _A )
_UpperCamelCase = kwargs.get('''latest_model_name''' , _A )
def __call__( self : Union[str, Any] , **_A : Tuple ):
_UpperCamelCase = {k: np.array(_A ) for k, v in kwargs.items()}
return self.model.run(_A , _A )
@staticmethod
def UpperCamelCase_ ( _A : Union[str, Path] , _A : Dict=None , _A : Tuple=None ):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
_UpperCamelCase = '''CPUExecutionProvider'''
return ort.InferenceSession(_A , providers=[provider] , sess_options=_A )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Union[str, Path] , _A : Optional[str] = None , **_A : int ):
_UpperCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_UpperCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
_UpperCamelCase = Path(_A ).joinpath(_A )
try:
shutil.copyfile(_A , _A )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_UpperCamelCase = self.model_save_dir.joinpath(_A )
if src_path.exists():
_UpperCamelCase = Path(_A ).joinpath(_A )
try:
shutil.copyfile(_A , _A )
except shutil.SameFileError:
pass
def UpperCamelCase_ ( self : Dict , _A : Union[str, os.PathLike] , **_A : Dict , ):
if os.path.isfile(_A ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(_A , exist_ok=_A )
# saving model weights/files
self._save_pretrained(_A , **_A )
@classmethod
def UpperCamelCase_ ( cls : Any , _A : Union[str, Path] , _A : Optional[Union[bool, str, None]] = None , _A : Optional[Union[str, None]] = None , _A : bool = False , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional["ort.SessionOptions"] = None , **_A : List[Any] , ):
_UpperCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_A ):
_UpperCamelCase = OnnxRuntimeModel.load_model(
os.path.join(_A , _A ) , provider=_A , sess_options=_A )
_UpperCamelCase = Path(_A )
# load model from hub
else:
# download model
_UpperCamelCase = hf_hub_download(
repo_id=_A , filename=_A , use_auth_token=_A , revision=_A , cache_dir=_A , force_download=_A , )
_UpperCamelCase = Path(_A ).parent
_UpperCamelCase = Path(_A ).name
_UpperCamelCase = OnnxRuntimeModel.load_model(_A , provider=_A , sess_options=_A )
return cls(model=_A , **_A )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , _A : Union[str, Path] , _A : bool = True , _A : Optional[str] = None , _A : Optional[str] = None , **_A : List[str] , ):
_UpperCamelCase = None
if len(str(_A ).split('''@''' ) ) == 2:
_UpperCamelCase , _UpperCamelCase = model_id.split('''@''' )
return cls._from_pretrained(
model_id=_A , revision=_A , cache_dir=_A , force_download=_A , use_auth_token=_A , **_A , )
| 10 | import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_lowerCAmelCase = True
from torch.cuda.amp import autocast
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase = field(
default=0.1, metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
}, )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."}, )
UpperCAmelCase = field(
default=0.0_5, metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
}, )
UpperCAmelCase = field(default=0.0, metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(
default="train+validation", metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The number of processes to use for the preprocessing."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
}, )
UpperCAmelCase = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"], metadata={"help": "A list of characters to remove from the transcripts."}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
def __call__( self : Union[str, Any] , _A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_UpperCamelCase = [{'''input_values''': feature['''input_values''']} for feature in features]
_UpperCamelCase = [{'''input_ids''': feature['''labels''']} for feature in features]
_UpperCamelCase = self.processor.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_UpperCamelCase = self.processor.pad(
labels=_A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_UpperCamelCase = labels
return batch
class lowerCAmelCase_ ( __lowercase ):
def UpperCamelCase_ ( self : Dict , _A : nn.Module , _A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
_UpperCamelCase = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(_A , _A )
else:
_UpperCamelCase = self.compute_loss(_A , _A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
return loss.detach()
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
_UpperCamelCase = f"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__snake_case ):
_UpperCamelCase = re.sub(__snake_case , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
_UpperCamelCase = train_dataset.map(__snake_case , remove_columns=['''sentence'''] )
_UpperCamelCase = eval_dataset.map(__snake_case , remove_columns=['''sentence'''] )
def extract_all_chars(__snake_case ):
_UpperCamelCase = ''' '''.join(batch['''text'''] )
_UpperCamelCase = list(set(__snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=train_dataset.column_names , )
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=eval_dataset.column_names , )
_UpperCamelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
_UpperCamelCase = {v: k for k, v in enumerate(__snake_case )}
_UpperCamelCase = vocab_dict[''' ''']
del vocab_dict[" "]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(__snake_case )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(__snake_case , __snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=__snake_case , return_attention_mask=__snake_case )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
_UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(__snake_case ) , data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(__snake_case ) )
if data_args.max_val_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__snake_case ):
_UpperCamelCase , _UpperCamelCase = torchaudio.load(batch['''path'''] )
_UpperCamelCase = resampler(__snake_case ).squeeze().numpy()
_UpperCamelCase = 16000
_UpperCamelCase = batch['''text''']
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__snake_case ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_UpperCamelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__snake_case )
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase = datasets.load_metric('''wer''' )
def compute_metrics(__snake_case ):
_UpperCamelCase = pred.predictions
_UpperCamelCase = np.argmax(__snake_case , axis=-1 )
_UpperCamelCase = processor.tokenizer.pad_token_id
_UpperCamelCase = processor.batch_decode(__snake_case )
# we do not want to group tokens when computing the metrics
_UpperCamelCase = processor.batch_decode(pred.label_ids , group_tokens=__snake_case )
_UpperCamelCase = wer_metric.compute(predictions=__snake_case , references=__snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase = DataCollatorCTCWithPadding(processor=__snake_case , padding=__snake_case )
# Initialize our Trainer
_UpperCamelCase = CTCTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , compute_metrics=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case )
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
return results
if __name__ == "__main__":
main()
| 10 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = list_field(
default=[], metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
}, )
UpperCAmelCase = list_field(
default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
UpperCAmelCase = list_field(
default=[8, 32, 128, 512], metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Use FP16 to accelerate inference."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Benchmark training of model"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Verbose memory tracing"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Trace memory line by line"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save result to a CSV file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save all print statements in a log file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to print environment information"} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
}, )
UpperCAmelCase = field(
default=F"""inference_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv."}, )
UpperCAmelCase = field(
default=F"""inference_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv."}, )
UpperCAmelCase = field(
default=F"""train_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv for training."}, )
UpperCAmelCase = field(
default=F"""train_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv for training."}, )
UpperCAmelCase = field(
default=F"""env_info_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving environment information."}, )
UpperCAmelCase = field(
default=F"""log_{round(time() )}.csv""", metadata={"help": "Log filename used if print statements are saved in log."}, )
UpperCAmelCase = field(default=3, metadata={"help": "Times an experiment will be run."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
}, )
def UpperCamelCase_ ( self : Union[str, Any] ):
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _A , )
def UpperCamelCase_ ( self : str ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : List[Any] ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 10 | import math
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : int=0 ): # a graph with Node 0,1,...,N-1
_UpperCamelCase = n
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # adjacency matrix for weight
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase_ ( self : Dict , _A : str , _A : List[str] , _A : Optional[Any] ):
_UpperCamelCase = w
def UpperCamelCase_ ( self : Optional[int] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] , _A : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 10 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "vocab.json"}
_lowerCAmelCase = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
_lowerCAmelCase = {"mgp-str": 27}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , _A : List[str] , _A : int="[GO]" , _A : Union[str, Any]="[GO]" , _A : Any="[s]" , _A : Dict="[GO]" , **_A : Optional[Any] ):
super().__init__(
unk_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , **_A , )
with open(_A , encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase = json.load(_A )
_UpperCamelCase = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return len(self.vocab )
def UpperCamelCase_ ( self : int ):
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict ):
_UpperCamelCase = []
for s in text:
char_tokens.extend(_A )
return char_tokens
def UpperCamelCase_ ( self : str , _A : Dict ):
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self : Dict , _A : List[str] ):
return self.decoder.get(_A )
def UpperCamelCase_ ( self : Any , _A : str , _A : Optional[str] = None ):
if not os.path.isdir(_A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_A ) )
return
_UpperCamelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '''\n''' )
return (vocab_file,)
| 10 | import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = list_field(
default=[], metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
}, )
UpperCAmelCase = list_field(
default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
UpperCAmelCase = list_field(
default=[8, 32, 128, 512], metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Use FP16 to accelerate inference."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Benchmark training of model"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Verbose memory tracing"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Trace memory line by line"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save result to a CSV file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save all print statements in a log file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to print environment information"} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
}, )
UpperCAmelCase = field(
default=F"""inference_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv."}, )
UpperCAmelCase = field(
default=F"""inference_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv."}, )
UpperCAmelCase = field(
default=F"""train_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv for training."}, )
UpperCAmelCase = field(
default=F"""train_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv for training."}, )
UpperCAmelCase = field(
default=F"""env_info_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving environment information."}, )
UpperCAmelCase = field(
default=F"""log_{round(time() )}.csv""", metadata={"help": "Log filename used if print statements are saved in log."}, )
UpperCAmelCase = field(default=3, metadata={"help": "Times an experiment will be run."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
}, )
def UpperCamelCase_ ( self : Union[str, Any] ):
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _A , )
def UpperCamelCase_ ( self : str ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : List[Any] ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 10 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCAmelCase = "src/diffusers"
_lowerCAmelCase = "."
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCAmelCase = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCAmelCase = spec.loader.load_module()
def _snake_case ( __snake_case , __snake_case ):
return line.startswith(__snake_case ) or len(__snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , __snake_case ) is not None
def _snake_case ( __snake_case ):
_UpperCamelCase = object_name.split('''.''' )
_UpperCamelCase = 0
# First let's find the module where our object lives.
_UpperCamelCase = parts[i]
while i < len(__snake_case ) and not os.path.isfile(os.path.join(__snake_case , f"""{module}.py""" ) ):
i += 1
if i < len(__snake_case ):
_UpperCamelCase = os.path.join(__snake_case , parts[i] )
if i >= len(__snake_case ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__snake_case , f"""{module}.py""" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCamelCase = f.readlines()
# Now let's find the class / func in the code!
_UpperCamelCase = ''''''
_UpperCamelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(__snake_case ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__snake_case ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_UpperCamelCase = line_index
while line_index < len(__snake_case ) and _should_continue(lines[line_index] , __snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCamelCase = lines[start_index:line_index]
return "".join(__snake_case )
_lowerCAmelCase = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
_lowerCAmelCase = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
_lowerCAmelCase = re.compile(r"<FILL\s+[^>]*>")
def _snake_case ( __snake_case ):
_UpperCamelCase = code.split('''\n''' )
_UpperCamelCase = 0
while idx < len(__snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__snake_case ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def _snake_case ( __snake_case ):
_UpperCamelCase = len(get_indent(__snake_case ) ) > 0
if has_indent:
_UpperCamelCase = f"""class Bla:\n{code}"""
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__snake_case )
_UpperCamelCase = black.format_str(__snake_case , mode=__snake_case )
_UpperCamelCase , _UpperCamelCase = style_docstrings_in_code(__snake_case )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def _snake_case ( __snake_case , __snake_case=False ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = []
_UpperCamelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__snake_case ):
_UpperCamelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = search.groups()
_UpperCamelCase = find_code_in_diffusers(__snake_case )
_UpperCamelCase = get_indent(__snake_case )
_UpperCamelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_UpperCamelCase = theoretical_indent
_UpperCamelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_UpperCamelCase = True
while line_index < len(__snake_case ) and should_continue:
line_index += 1
if line_index >= len(__snake_case ):
break
_UpperCamelCase = lines[line_index]
_UpperCamelCase = _should_continue(__snake_case , __snake_case ) and re.search(f"""^{indent}# End copy""" , __snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCamelCase = lines[start_index:line_index]
_UpperCamelCase = ''''''.join(__snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
_UpperCamelCase = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(__snake_case ) is None]
_UpperCamelCase = '''\n'''.join(__snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(__snake_case ) > 0:
_UpperCamelCase = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
_UpperCamelCase = [_re_replace_pattern.search(__snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = pattern.groups()
_UpperCamelCase = re.sub(__snake_case , __snake_case , __snake_case )
if option.strip() == "all-casing":
_UpperCamelCase = re.sub(obja.lower() , obja.lower() , __snake_case )
_UpperCamelCase = re.sub(obja.upper() , obja.upper() , __snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_UpperCamelCase = blackify(lines[start_index - 1] + theoretical_code )
_UpperCamelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_UpperCamelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_UpperCamelCase = start_index + 1
if overwrite and len(__snake_case ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
return diffs
def _snake_case ( __snake_case = False ):
_UpperCamelCase = glob.glob(os.path.join(__snake_case , '''**/*.py''' ) , recursive=__snake_case )
_UpperCamelCase = []
for filename in all_files:
_UpperCamelCase = is_copy_consistent(__snake_case , __snake_case )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__snake_case ) > 0:
_UpperCamelCase = '''\n'''.join(__snake_case )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowerCAmelCase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 10 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *__snake_case , __snake_case = None , __snake_case=True , __snake_case=2 ):
from .. import __version__
_UpperCamelCase = take_from
_UpperCamelCase = ()
if not isinstance(args[0] , __snake_case ):
_UpperCamelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__snake_case ).base_version ) >= version.parse(__snake_case ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
_UpperCamelCase = None
if isinstance(__snake_case , __snake_case ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__snake_case ),)
_UpperCamelCase = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__snake_case , __snake_case ):
values += (getattr(__snake_case , __snake_case ),)
_UpperCamelCase = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_UpperCamelCase = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_UpperCamelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __snake_case , stacklevel=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0:
_UpperCamelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCamelCase = call_frame.filename
_UpperCamelCase = call_frame.lineno
_UpperCamelCase = call_frame.function
_UpperCamelCase , _UpperCamelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__snake_case ) == 0:
return
elif len(__snake_case ) == 1:
return values[0]
return values
| 10 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowerCAmelCase = datasets.load_iris()
_lowerCAmelCase = np.array(data["data"])
_lowerCAmelCase = np.array(data["target"])
_lowerCAmelCase = data["target_names"]
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase = train_test_split(X, y)
def _snake_case ( __snake_case , __snake_case ):
return np.linalg.norm(np.array(__snake_case ) - np.array(__snake_case ) )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=5 ):
_UpperCamelCase = zip(__snake_case , __snake_case )
# List of distances of all points from the point to be classified
_UpperCamelCase = []
for data_point in data:
_UpperCamelCase = euclidean_distance(data_point[0] , __snake_case )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_UpperCamelCase = [i[1] for i in sorted(__snake_case )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_UpperCamelCase = Counter(__snake_case ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 10 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case , __snake_case ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(__snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__snake_case ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__snake_case , p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __snake_case , __snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( __snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += [key]
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
def _snake_case ( *__snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += keys
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
class lowerCAmelCase_ ( __lowercase ):
def __new__( cls : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ):
_UpperCamelCase = super().__new__(cls , _A , _A , _A )
if not hasattr(_A , '''key_handler''' ):
setattr(_A , '''key_handler''' , {} )
setattr(_A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCamelCase = getattr(_A , '''handle_key''' , [] )
for key in handled_keys:
_UpperCamelCase = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls : str ):
_UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCamelCase = ord(_A )
_UpperCamelCase = cls.key_handler.get(_A )
if handler:
_UpperCamelCase = char
return handler(cls )
else:
return None
def _snake_case ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 10 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "trocr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=5_0265 , _A : Optional[Any]=1024 , _A : Optional[Any]=12 , _A : Any=16 , _A : Any=4096 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=512 , _A : Dict=0.1 , _A : List[str]=0.0 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=2 , _A : Any=0.02 , _A : List[str]=0.0 , _A : List[str]=True , _A : str=False , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[int]=1 , _A : int=0 , _A : Any=2 , **_A : Optional[int] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 10 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = "▁"
_lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = BertGenerationTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[str] ):
super().setUp()
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''<s>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_A ) , 1002 )
def UpperCamelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''Hello World!'''
_UpperCamelCase = [1_8536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_UpperCamelCase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCamelCase_ ( self : Dict ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCamelCase = ''' '''.join(_A )
_UpperCamelCase = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = BertGenerationConfig()
_UpperCamelCase = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 10 | import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Any=13 , _A : Union[str, Any]=7 , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[Any]=True , _A : Optional[int]=False , _A : Any=False , _A : int=False , _A : Optional[Any]=2 , _A : Any=99 , _A : str=0 , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : Tuple=4 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=512 , _A : Union[str, Any]=12 , _A : List[str]=2 , _A : int=0.02 , _A : Optional[Any]=3 , _A : Any=4 , _A : Optional[int]="last" , _A : Any=None , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_lengths
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = gelu_activation
_UpperCamelCase = sinusoidal_embeddings
_UpperCamelCase = causal
_UpperCamelCase = asm
_UpperCamelCase = n_langs
_UpperCamelCase = vocab_size
_UpperCamelCase = n_special
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = summary_type
_UpperCamelCase = use_proj
_UpperCamelCase = scope
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_input_lengths:
_UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self : str ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Any , _A : str , _A : Optional[int] , ):
_UpperCamelCase = FlaubertModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , lengths=_A , langs=_A )
_UpperCamelCase = model(_A , langs=_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : str , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int , _A : str , _A : List[Any] , _A : Any , ):
_UpperCamelCase = FlaubertWithLMHeadModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : str , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : Dict , ):
_UpperCamelCase = FlaubertForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Tuple , _A : str , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : int , _A : str , _A : Dict , _A : List[Any] , ):
_UpperCamelCase = FlaubertForQuestionAnswering(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : Tuple , _A : str , _A : int , _A : int , _A : Optional[int] , _A : Optional[int] , _A : int , _A : List[str] , ):
_UpperCamelCase = FlaubertForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[str] , _A : Optional[Any] , _A : str , _A : Union[str, Any] , _A : List[Any] , _A : int , _A : List[Any] , _A : str , _A : List[str] , ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = FlaubertForTokenClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : str , _A : Optional[Any] , _A : List[str] , _A : Any , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : List[str] , ):
_UpperCamelCase = self.num_choices
_UpperCamelCase = FlaubertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict , _A : Dict , _A : Tuple , _A : int , _A : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self : str , _A : Any , _A : List[str] , _A : Optional[int]=False ):
_UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = FlaubertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_A )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=_A )
_UpperCamelCase = self._prepare_for_class(_A , _A )
_UpperCamelCase = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
_UpperCamelCase = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
_UpperCamelCase = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCAmelCase = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 | 1 |
import math
def _snake_case ( __snake_case ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( __snake_case = 0.1 ):
_UpperCamelCase = 3
_UpperCamelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | from __future__ import annotations
_lowerCAmelCase = [True] * 1_000_001
_lowerCAmelCase = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
_lowerCAmelCase = False
i += 1
def _snake_case ( __snake_case ):
return seive[n]
def _snake_case ( __snake_case ):
return any(digit in '''02468''' for digit in str(__snake_case ) )
def _snake_case ( __snake_case = 1000000 ):
_UpperCamelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__snake_case ) and not contains_an_even_digit(__snake_case ):
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(__snake_case ) )]
if all(is_prime(__snake_case ) for i in list_nums ):
result.append(__snake_case )
return result
def _snake_case ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 10 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_lowerCAmelCase = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_lowerCAmelCase = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_lowerCAmelCase = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def UpperCamelCase_ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def UpperCamelCase_ ( self : Optional[Any] , _A : Optional[Any] , _A : int , _A : int=4 , _A : Union[str, Any]=False ):
_UpperCamelCase = compute_bleu(
reference_corpus=_A , translation_corpus=_A , max_order=_A , smooth=_A )
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 10 | import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = DebertaVaTokenizer
UpperCAmelCase = DebertaVaTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = DebertaVaTokenizer(_A , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict , _A : Union[str, Any] ):
_UpperCamelCase = '''this is a test'''
_UpperCamelCase = '''this is a test'''
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_A ) , 3_0001 )
def UpperCamelCase_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase_ ( self : List[str] ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : int ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Tuple ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(_A )
_UpperCamelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''This is a test'''
_UpperCamelCase = [13, 1, 4398, 25, 21, 1289]
_UpperCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = DebertaVaTokenizer(_A , keep_accents=_A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_UpperCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = DebertaVaTokenizer(_A )
_UpperCamelCase = tokenizer.encode('''sequence builders''' )
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 10 | 1 |
from __future__ import annotations
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , _A : list[list[int]] ):
_UpperCamelCase = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(_A ) != 0:
_UpperCamelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_A ) != cols:
raise error
for value in row:
if not isinstance(_A , (int, float) ):
raise error
_UpperCamelCase = rows
else:
_UpperCamelCase = []
def UpperCamelCase_ ( self : Optional[Any] ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCamelCase_ ( self : List[Any] ):
return len(self.rows )
@property
def UpperCamelCase_ ( self : List[Any] ):
return len(self.rows[0] )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return (self.num_rows, self.num_columns)
@property
def UpperCamelCase_ ( self : Tuple ):
return self.order[0] == self.order[1]
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_A )
def UpperCamelCase_ ( self : List[str] ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCamelCase_ ( self : Optional[Any] ):
return bool(self.determinant() )
def UpperCamelCase_ ( self : str , _A : int , _A : int ):
_UpperCamelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_A ).determinant()
def UpperCamelCase_ ( self : Optional[int] , _A : int , _A : int ):
if (row + column) % 2 == 0:
return self.get_minor(_A , _A )
return -1 * self.get_minor(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
return Matrix(
[
[self.get_minor(_A , _A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCamelCase_ ( self : Optional[int] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_A )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[Any] ):
return str(self.rows )
def __str__( self : List[Any] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(_A ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def UpperCamelCase_ ( self : List[str] , _A : list[int] , _A : int | None = None ):
_UpperCamelCase = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(_A , _A ):
raise type_error
for value in row:
if not isinstance(_A , (int, float) ):
raise type_error
if len(_A ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(_A )
else:
_UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:]
def UpperCamelCase_ ( self : Optional[int] , _A : list[int] , _A : int | None = None ):
_UpperCamelCase = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(_A , _A ):
raise type_error
for value in column:
if not isinstance(_A , (int, float) ):
raise type_error
if len(_A ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
_UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_UpperCamelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : List[Any] , _A : object ):
if not isinstance(_A , _A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[str] , _A : object ):
return not self == other
def __neg__( self : Any ):
return self * -1
def __add__( self : Union[str, Any] , _A : Matrix ):
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Union[str, Any] , _A : Matrix ):
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Tuple , _A : Matrix | int | float ):
if isinstance(_A , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_A , _A ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(_A , _A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self : List[Any] , _A : int ):
if not isinstance(_A , _A ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
_UpperCamelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , _A : list[int] , _A : list[int] ):
return sum(row[i] * column[i] for i in range(len(_A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , _A : pyspark.sql.DataFrame , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : bool = True , _A : str = None , _A : bool = False , _A : str = None , _A : bool = True , _A : str = "arrow" , **_A : str , ):
super().__init__(
split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , **_A , )
_UpperCamelCase = load_from_cache_file
_UpperCamelCase = file_format
_UpperCamelCase = Spark(
df=_A , features=_A , cache_dir=_A , working_dir=_A , **_A , )
def UpperCamelCase_ ( self : Optional[Any] ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCamelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_A , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 10 | import logging
import os
from .state import PartialState
class lowerCAmelCase_ ( logging.LoggerAdapter ):
@staticmethod
def UpperCamelCase_ ( _A : Any ):
_UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : str , *_A : int , **_A : List[Any] ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCamelCase = kwargs.pop('''main_process_only''' , _A )
_UpperCamelCase = kwargs.pop('''in_order''' , _A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
elif in_order:
_UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
state.wait_for_everyone()
def _snake_case ( __snake_case , __snake_case = None ):
if log_level is None:
_UpperCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __snake_case )
_UpperCamelCase = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case , {} )
| 10 | 1 |
def _snake_case ( __snake_case , __snake_case ):
while a != 0:
_UpperCamelCase , _UpperCamelCase = b % a, a
return b
def _snake_case ( __snake_case , __snake_case ):
if gcd(__snake_case , __snake_case ) != 1:
_UpperCamelCase = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__snake_case )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1, 0, a
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0, 1, m
while va != 0:
_UpperCamelCase = ua // va
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 10 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = "▁"
_lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = BertGenerationTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[str] ):
super().setUp()
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''<s>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_A ) , 1002 )
def UpperCamelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''Hello World!'''
_UpperCamelCase = [1_8536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_UpperCamelCase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCamelCase_ ( self : Dict ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCamelCase = ''' '''.join(_A )
_UpperCamelCase = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = BertGenerationConfig()
_UpperCamelCase = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 10 | 1 |
def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = (n * (n + 1) // 2) ** 2
_UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : str , _A : List[str] , _A : Optional[Any] , _A : List[str] , _A : Optional[int]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 10 | from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( __snake_case , __snake_case ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__snake_case , __snake_case ) ) )
def _snake_case ( __snake_case , __snake_case ):
if dataset.ndim != value_array.ndim:
_UpperCamelCase = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = []
for value in value_array:
_UpperCamelCase = euclidean(__snake_case , dataset[0] )
_UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase = euclidean(__snake_case , __snake_case )
if dist > temp_dist:
_UpperCamelCase = temp_dist
_UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( __snake_case , __snake_case ):
return np.dot(__snake_case , __snake_case ) / (norm(__snake_case ) * norm(__snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "deta"
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] , _A : Tuple=None , _A : Dict=900 , _A : Union[str, Any]=2048 , _A : Union[str, Any]=6 , _A : List[str]=2048 , _A : str=8 , _A : Optional[int]=6 , _A : List[str]=1024 , _A : Optional[int]=8 , _A : List[str]=0.0 , _A : List[str]=True , _A : Any="relu" , _A : Any=256 , _A : Optional[int]=0.1 , _A : str=0.0 , _A : Dict=0.0 , _A : str=0.02 , _A : Union[str, Any]=1.0 , _A : Union[str, Any]=True , _A : Any=False , _A : Union[str, Any]="sine" , _A : int=5 , _A : Optional[Any]=4 , _A : Any=4 , _A : Union[str, Any]=True , _A : Dict=300 , _A : List[Any]=True , _A : Any=True , _A : Tuple=1 , _A : Optional[int]=5 , _A : str=2 , _A : Tuple=1 , _A : Tuple=1 , _A : Any=5 , _A : Tuple=2 , _A : str=0.1 , _A : List[str]=0.25 , **_A : Dict , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_A , _A ):
_UpperCamelCase = backbone_config.pop('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(_A )
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCamelCase_ ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self : int ):
return self.d_model
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 10 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ShapEPipeline
UpperCAmelCase = ["prompt"]
UpperCAmelCase = ["prompt"]
UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase = PriorTransformer(**_A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase = ShapERenderer(**_A )
return model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.dummy_prior
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = self.dummy_tokenizer
_UpperCamelCase = self.dummy_renderer
_UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
_UpperCamelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch_device == '''cpu'''
_UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase = batch_size * [inputs[key]]
_UpperCamelCase = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(0 )
_UpperCamelCase = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 10 | 1 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowerCAmelCase = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _snake_case ( __snake_case ):
_UpperCamelCase = {}
state_dict.pop('''pixel_mean''' , __snake_case )
state_dict.pop('''pixel_std''' , __snake_case )
_UpperCamelCase = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_UpperCamelCase = key.replace(__snake_case , __snake_case )
if re.match(__snake_case , __snake_case ):
_UpperCamelCase = int(re.match(__snake_case , __snake_case ).group(2 ) )
if layer_nb == 0:
_UpperCamelCase = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
_UpperCamelCase = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
_UpperCamelCase = key.replace('''layers.2''' , '''proj_out''' )
_UpperCamelCase = value
_UpperCamelCase = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case="ybelkada/segment-anything" ):
_UpperCamelCase = hf_hub_download(__snake_case , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
_UpperCamelCase = SamConfig()
elif "sam_vit_l" in model_name:
_UpperCamelCase = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_UpperCamelCase = SamConfig(
vision_config=__snake_case , )
elif "sam_vit_h" in model_name:
_UpperCamelCase = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_UpperCamelCase = SamConfig(
vision_config=__snake_case , )
_UpperCamelCase = torch.load(__snake_case , map_location='''cpu''' )
_UpperCamelCase = replace_keys(__snake_case )
_UpperCamelCase = SamImageProcessor()
_UpperCamelCase = SamProcessor(image_processor=__snake_case )
_UpperCamelCase = SamModel(__snake_case )
hf_model.load_state_dict(__snake_case )
_UpperCamelCase = hf_model.to('''cuda''' )
_UpperCamelCase = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
_UpperCamelCase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' )
_UpperCamelCase = [[[400, 650]]]
_UpperCamelCase = [[1]]
_UpperCamelCase = processor(images=np.array(__snake_case ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_UpperCamelCase = hf_model(**__snake_case )
_UpperCamelCase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
_UpperCamelCase = processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_UpperCamelCase = hf_model(**__snake_case )
_UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
_UpperCamelCase = ((75, 275, 1725, 850),)
_UpperCamelCase = processor(images=np.array(__snake_case ) , input_boxes=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_UpperCamelCase = hf_model(**__snake_case )
_UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
_UpperCamelCase = [[[400, 650], [800, 650]]]
_UpperCamelCase = [[1, 1]]
_UpperCamelCase = processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_UpperCamelCase = hf_model(**__snake_case )
_UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
_lowerCAmelCase = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
_lowerCAmelCase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 10 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCAmelCase = HfApi()
_lowerCAmelCase = {}
# fmt: off
_lowerCAmelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_lowerCAmelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_lowerCAmelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_lowerCAmelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_lowerCAmelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_lowerCAmelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_lowerCAmelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_lowerCAmelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_lowerCAmelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_lowerCAmelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_lowerCAmelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_lowerCAmelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_lowerCAmelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_lowerCAmelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_lowerCAmelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_lowerCAmelCase = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCAmelCase = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith("CompVis"):
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 10 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
def __init__( self : List[Any] , _A : Optional[int] , ):
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = 32
_UpperCamelCase = 2
_UpperCamelCase = 4
_UpperCamelCase = 37
_UpperCamelCase = '''gelu'''
_UpperCamelCase = 0.1
_UpperCamelCase = 0.1
_UpperCamelCase = 512
_UpperCamelCase = 16
_UpperCamelCase = 2
_UpperCamelCase = 0.02
_UpperCamelCase = 3
_UpperCamelCase = 4
_UpperCamelCase = None
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Optional[Any] ):
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self : List[str] , _A : List[str] , _A : Dict , _A : List[Any] , _A : Optional[Any] , _A : int , _A : Dict ):
_UpperCamelCase = TFEsmModel(config=_A )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase = model(_A )
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[str] , _A : List[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Tuple , _A : Dict , _A : Tuple , _A : Tuple , _A : List[Any] , ):
_UpperCamelCase = True
_UpperCamelCase = TFEsmModel(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
_UpperCamelCase = model(_A )
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_A , encoder_hidden_states=_A )
# Also check the case where encoder outputs are not passed
_UpperCamelCase = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : int , _A : List[Any] , _A : str , _A : Optional[Any] , _A : Optional[int] , _A : Optional[Any] , _A : Tuple ):
_UpperCamelCase = TFEsmForMaskedLM(config=_A )
_UpperCamelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : List[Any] , _A : List[str] , _A : List[str] , _A : Any , _A : Dict , _A : str , _A : Any ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFEsmForTokenClassification(config=_A )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = TFEsmModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def UpperCamelCase_ ( self : List[Any] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFEsmModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCamelCase_ ( self : List[Any] ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCamelCase_ ( self : int ):
pass
def UpperCamelCase_ ( self : str ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(_A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_UpperCamelCase = model.get_bias()
assert isinstance(_A , _A )
for k, v in name.items():
assert isinstance(_A , tf.Variable )
else:
_UpperCamelCase = model.get_output_embeddings()
assert x is None
_UpperCamelCase = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _A )
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
_UpperCamelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_UpperCamelCase = model(_A )[0]
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 10 | from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( __snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += [key]
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
def _snake_case ( *__snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += keys
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
class lowerCAmelCase_ ( __lowercase ):
def __new__( cls : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ):
_UpperCamelCase = super().__new__(cls , _A , _A , _A )
if not hasattr(_A , '''key_handler''' ):
setattr(_A , '''key_handler''' , {} )
setattr(_A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCamelCase = getattr(_A , '''handle_key''' , [] )
for key in handled_keys:
_UpperCamelCase = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls : str ):
_UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCamelCase = ord(_A )
_UpperCamelCase = cls.key_handler.get(_A )
if handler:
_UpperCamelCase = char
return handler(cls )
else:
return None
def _snake_case ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 10 | 1 |
def _snake_case ( __snake_case , __snake_case ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def _snake_case ( __snake_case , __snake_case ):
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 10 | import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
] , )
_UpperCamelCase = text_generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
] , )
@require_tf
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_A )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def UpperCamelCase_ ( self : int , _A : str , _A : Union[str, Any] , _A : Any ):
_UpperCamelCase = TextGenerationPipeline(model=_A , tokenizer=_A )
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = '''Hello I believe in'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
_UpperCamelCase = text_generator(_A )
self.assertEqual(
_A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
_UpperCamelCase = text_generator(_A , stop_sequence=''' fe''' )
self.assertEqual(_A , [{'''generated_text''': '''Hello I believe in fe'''}] )
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : Union[str, Any] ):
_UpperCamelCase = text_generator.model
_UpperCamelCase = text_generator.tokenizer
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = pipeline(task='''text-generation''' , model=_A , tokenizer=_A , return_full_text=_A )
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCamelCase = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_text=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_tensors=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_text=_A , return_tensors=_A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCamelCase = text_generator('''''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCamelCase = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
_UpperCamelCase = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_A ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
# Classic `model_kwargs`
_UpperCamelCase = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_A , top_p=0.5 )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = '''Hello world'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
_UpperCamelCase = logging.get_logger('''transformers.generation.tf_utils''' )
else:
_UpperCamelCase = logging.get_logger('''transformers.generation.utils''' )
_UpperCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 , max_new_tokens=1 )
self.assertIn(_A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_new_tokens=1 )
self.assertNotIn(_A , cl.out )
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 )
self.assertNotIn(_A , cl.out )
| 10 | 1 |
def _snake_case ( __snake_case , __snake_case ):
assert x is not None
assert y is not None
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(__snake_case )
# declaring the array for storing the dp values
_UpperCamelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_UpperCamelCase = 1 if x[i - 1] == y[j - 1] else 0
_UpperCamelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_UpperCamelCase = ''''''
_UpperCamelCase , _UpperCamelCase = m, n
while i > 0 and j > 0:
_UpperCamelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_UpperCamelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_lowerCAmelCase = "AGGTAB"
_lowerCAmelCase = "GXTXAYB"
_lowerCAmelCase = 4
_lowerCAmelCase = "GTAB"
_lowerCAmelCase, _lowerCAmelCase = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 10 | def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = (n * (n + 1) // 2) ** 2
_UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
import math
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : int=0 ): # a graph with Node 0,1,...,N-1
_UpperCamelCase = n
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # adjacency matrix for weight
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase_ ( self : Dict , _A : str , _A : List[str] , _A : Optional[Any] ):
_UpperCamelCase = w
def UpperCamelCase_ ( self : Optional[int] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] , _A : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 10 | import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
def constraint_to_multiple_of(__snake_case , __snake_case , __snake_case=0 , __snake_case=None ):
_UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCamelCase = (output_size, output_size) if isinstance(__snake_case , __snake_case ) else output_size
_UpperCamelCase , _UpperCamelCase = get_image_size(__snake_case )
_UpperCamelCase , _UpperCamelCase = output_size
# determine new height and width
_UpperCamelCase = output_height / input_height
_UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCamelCase = scale_width
else:
# fit height
_UpperCamelCase = scale_height
_UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__snake_case )
_UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__snake_case )
return (new_height, new_width)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = False , _A : int = 1 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : List[str] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''height''': 384, '''width''': 384}
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : bool = False , _A : int = 1 , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
_UpperCamelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(
_A , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : int , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : int = None , _A : bool = None , _A : int = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
def UpperCamelCase_ ( self : Any , _A : Any , _A : List[Tuple] = None ):
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_A ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(_A ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 10 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCAmelCase_ :
def __init__( self : str , _A : Tuple , _A : Tuple=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : Optional[int]=True , _A : Any=99 , _A : str=32 , _A : List[str]=5 , _A : List[Any]=4 , _A : Tuple=37 , _A : Dict="gelu" , _A : int=0.1 , _A : List[Any]=0.1 , _A : Dict=512 , _A : int=16 , _A : List[Any]=2 , _A : Union[str, Any]=0.02 , _A : int=3 , _A : Union[str, Any]=4 , _A : List[Any]=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = self.vocab_size - 1
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : Union[str, Any] , *_A : List[str] ):
_UpperCamelCase = OpenAIGPTModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , head_mask=_A )
_UpperCamelCase = model(_A , token_type_ids=_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[Any] , _A : int , _A : str , _A : int , *_A : Optional[Any] ):
_UpperCamelCase = OpenAIGPTLMHeadModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , *_A : List[str] ):
_UpperCamelCase = OpenAIGPTDoubleHeadsModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Tuple , _A : Optional[int] , _A : Union[str, Any] , _A : List[str] , _A : Optional[Any] , *_A : List[Any] ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = OpenAIGPTForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCAmelCase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCAmelCase = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Optional[Any] , _A : Tuple , _A : Optional[int] , _A : List[Any] , _A : Optional[Any] , _A : Optional[int] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase_ ( self : Dict , _A : int , _A : Any , _A : Dict=False ):
_UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_A , )
_UpperCamelCase = inputs_dict['''labels''']
_UpperCamelCase = inputs_dict['''labels''']
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_A , )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = OpenAIGPTModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , n_embd=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A )
@slow
def UpperCamelCase_ ( self : List[str] ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = OpenAIGPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(_A )
_UpperCamelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_A ) # the president is
_UpperCamelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCamelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 10 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : str , _A : List[str] , _A : Optional[Any] , _A : List[str] , _A : Optional[int]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 10 | 1 |
import logging
import os
from .state import PartialState
class lowerCAmelCase_ ( logging.LoggerAdapter ):
@staticmethod
def UpperCamelCase_ ( _A : Any ):
_UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : str , *_A : int , **_A : List[Any] ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCamelCase = kwargs.pop('''main_process_only''' , _A )
_UpperCamelCase = kwargs.pop('''in_order''' , _A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
elif in_order:
_UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
state.wait_for_everyone()
def _snake_case ( __snake_case , __snake_case = None ):
if log_level is None:
_UpperCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __snake_case )
_UpperCamelCase = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case , {} )
| 10 | import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_lowerCAmelCase = True
from torch.cuda.amp import autocast
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase = field(
default=0.1, metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
}, )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."}, )
UpperCAmelCase = field(
default=0.0_5, metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
}, )
UpperCAmelCase = field(default=0.0, metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(
default="train+validation", metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The number of processes to use for the preprocessing."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
}, )
UpperCAmelCase = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"], metadata={"help": "A list of characters to remove from the transcripts."}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
def __call__( self : Union[str, Any] , _A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_UpperCamelCase = [{'''input_values''': feature['''input_values''']} for feature in features]
_UpperCamelCase = [{'''input_ids''': feature['''labels''']} for feature in features]
_UpperCamelCase = self.processor.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_UpperCamelCase = self.processor.pad(
labels=_A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_UpperCamelCase = labels
return batch
class lowerCAmelCase_ ( __lowercase ):
def UpperCamelCase_ ( self : Dict , _A : nn.Module , _A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
_UpperCamelCase = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(_A , _A )
else:
_UpperCamelCase = self.compute_loss(_A , _A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
return loss.detach()
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
_UpperCamelCase = f"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__snake_case ):
_UpperCamelCase = re.sub(__snake_case , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
_UpperCamelCase = train_dataset.map(__snake_case , remove_columns=['''sentence'''] )
_UpperCamelCase = eval_dataset.map(__snake_case , remove_columns=['''sentence'''] )
def extract_all_chars(__snake_case ):
_UpperCamelCase = ''' '''.join(batch['''text'''] )
_UpperCamelCase = list(set(__snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=train_dataset.column_names , )
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=eval_dataset.column_names , )
_UpperCamelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
_UpperCamelCase = {v: k for k, v in enumerate(__snake_case )}
_UpperCamelCase = vocab_dict[''' ''']
del vocab_dict[" "]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(__snake_case )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(__snake_case , __snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=__snake_case , return_attention_mask=__snake_case )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
_UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(__snake_case ) , data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(__snake_case ) )
if data_args.max_val_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__snake_case ):
_UpperCamelCase , _UpperCamelCase = torchaudio.load(batch['''path'''] )
_UpperCamelCase = resampler(__snake_case ).squeeze().numpy()
_UpperCamelCase = 16000
_UpperCamelCase = batch['''text''']
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__snake_case ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_UpperCamelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__snake_case )
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase = datasets.load_metric('''wer''' )
def compute_metrics(__snake_case ):
_UpperCamelCase = pred.predictions
_UpperCamelCase = np.argmax(__snake_case , axis=-1 )
_UpperCamelCase = processor.tokenizer.pad_token_id
_UpperCamelCase = processor.batch_decode(__snake_case )
# we do not want to group tokens when computing the metrics
_UpperCamelCase = processor.batch_decode(pred.label_ids , group_tokens=__snake_case )
_UpperCamelCase = wer_metric.compute(predictions=__snake_case , references=__snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase = DataCollatorCTCWithPadding(processor=__snake_case , padding=__snake_case )
# Initialize our Trainer
_UpperCamelCase = CTCTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , compute_metrics=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case )
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
return results
if __name__ == "__main__":
main()
| 10 | 1 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Any , **_A : Optional[int] ):
requires_backends(self , ['''bs4'''] )
super().__init__(**_A )
def UpperCamelCase_ ( self : Dict , _A : List[str] ):
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_UpperCamelCase = parent.find_all(child.name , recursive=_A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) )
_UpperCamelCase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCamelCase_ ( self : List[Any] , _A : int ):
_UpperCamelCase = BeautifulSoup(_A , '''html.parser''' )
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
for element in html_code.descendants:
if type(_A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_UpperCamelCase = html.unescape(_A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_A )
_UpperCamelCase , _UpperCamelCase = self.xpath_soup(_A )
stringaxtag_seq.append(_A )
stringaxsubs_seq.append(_A )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCamelCase_ ( self : int , _A : int , _A : int ):
_UpperCamelCase = ''''''
for tagname, subs in zip(_A , _A ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : str , _A : Optional[Any] ):
_UpperCamelCase = False
# Check that strings has a valid type
if isinstance(_A , _A ):
_UpperCamelCase = True
elif isinstance(_A , (list, tuple) ):
if len(_A ) == 0 or isinstance(html_strings[0] , _A ):
_UpperCamelCase = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_A )}.""" )
_UpperCamelCase = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) )
if not is_batched:
_UpperCamelCase = [html_strings]
# Get nodes + xpaths
_UpperCamelCase = []
_UpperCamelCase = []
for html_string in html_strings:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.get_three_from_single(_A )
nodes.append(_A )
_UpperCamelCase = []
for node, tag_list, sub_list in zip(_A , _A , _A ):
_UpperCamelCase = self.construct_xpath(_A , _A )
xpath_strings.append(_A )
xpaths.append(_A )
# return as Dict
_UpperCamelCase = {'''nodes''': nodes, '''xpaths''': xpaths}
_UpperCamelCase = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 10 | import math
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : int=0 ): # a graph with Node 0,1,...,N-1
_UpperCamelCase = n
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # adjacency matrix for weight
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase_ ( self : Dict , _A : str , _A : List[str] , _A : Optional[Any] ):
_UpperCamelCase = w
def UpperCamelCase_ ( self : Optional[int] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] , _A : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 10 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _snake_case ( ):
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=__snake_case , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=__snake_case , default=5 )
parser.add_argument('''--batch_size''' , type=__snake_case , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=__snake_case , default=1 )
parser.add_argument('''--freeze''' , type=__snake_case , default=__snake_case )
parser.add_argument('''--learning_rate''' , type=__snake_case , default=5E-4 )
parser.add_argument('''--seed''' , type=__snake_case , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=__snake_case , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=__snake_case , default=10 )
parser.add_argument('''--weight_decay''' , type=__snake_case , default=0.01 )
parser.add_argument('''--output_dir''' , type=__snake_case , default='''./results''' )
return parser.parse_args()
_lowerCAmelCase = load("accuracy")
def _snake_case ( __snake_case ):
_UpperCamelCase , _UpperCamelCase = eval_pred
_UpperCamelCase = np.argmax(__snake_case , axis=1 )
return metric.compute(predictions=__snake_case , references=__snake_case )
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Optional[int] , _A : Any ):
super().__init__()
_UpperCamelCase = trainer
def UpperCamelCase_ ( self : int , _A : List[str] , _A : List[str] , _A : Union[str, Any] , **_A : Tuple ):
if control.should_evaluate:
_UpperCamelCase = deepcopy(_A )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def _snake_case ( ):
_UpperCamelCase = get_args()
set_seed(args.seed )
_UpperCamelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
_UpperCamelCase = dataset.train_test_split(test_size=0.2 )
_UpperCamelCase = train_test['''test'''].train_test_split(test_size=0.5 )
_UpperCamelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
_UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCamelCase = tokenizer.eos_token
_UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCamelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCamelCase = False
_UpperCamelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(__snake_case ):
_UpperCamelCase = tokenizer(example['''src'''] , truncation=__snake_case , max_length=1024 )
_UpperCamelCase = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCamelCase = train_test_validation.map(
__snake_case , batched=__snake_case , remove_columns=train_test_validation['''train'''].column_names , )
_UpperCamelCase = DataCollatorWithPadding(tokenizer=__snake_case )
_UpperCamelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , )
print('''Training...''' )
trainer.add_callback(CustomCallback(__snake_case ) )
trainer.train()
if __name__ == "__main__":
main()
| 10 | import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = list_field(
default=[], metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
}, )
UpperCAmelCase = list_field(
default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
UpperCAmelCase = list_field(
default=[8, 32, 128, 512], metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Use FP16 to accelerate inference."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Benchmark training of model"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Verbose memory tracing"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Trace memory line by line"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save result to a CSV file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save all print statements in a log file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to print environment information"} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
}, )
UpperCAmelCase = field(
default=F"""inference_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv."}, )
UpperCAmelCase = field(
default=F"""inference_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv."}, )
UpperCAmelCase = field(
default=F"""train_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv for training."}, )
UpperCAmelCase = field(
default=F"""train_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv for training."}, )
UpperCAmelCase = field(
default=F"""env_info_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving environment information."}, )
UpperCAmelCase = field(
default=F"""log_{round(time() )}.csv""", metadata={"help": "Log filename used if print statements are saved in log."}, )
UpperCAmelCase = field(default=3, metadata={"help": "Times an experiment will be run."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
}, )
def UpperCamelCase_ ( self : Union[str, Any] ):
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _A , )
def UpperCamelCase_ ( self : str ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : List[Any] ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 10 | 1 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _snake_case ( __snake_case ):
_UpperCamelCase = np.inf
def set_batch_size(__snake_case ) -> None:
nonlocal batch_size
if isinstance(__snake_case , __snake_case ):
_UpperCamelCase = min(__snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__snake_case , __snake_case ):
_UpperCamelCase = min(__snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__snake_case , __snake_case ) and feature.dtype == "binary":
_UpperCamelCase = min(__snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__snake_case , __snake_case )
return None if batch_size is np.inf else batch_size
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[str] , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[int] = None , **_A : Dict , ):
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
_UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths}
_UpperCamelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
_UpperCamelCase = Parquet(
cache_dir=_A , data_files=_A , features=_A , hash=_A , **_A , )
def UpperCamelCase_ ( self : List[str] ):
# Build iterable dataset
if self.streaming:
_UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
_UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , **_A : List[str] , ):
_UpperCamelCase = dataset
_UpperCamelCase = path_or_buf
_UpperCamelCase = batch_size or get_writer_batch_size(dataset.features )
_UpperCamelCase = parquet_writer_kwargs
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
_UpperCamelCase = self._write(file_obj=_A , batch_size=_A , **self.parquet_writer_kwargs )
else:
_UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=_A , **self.parquet_writer_kwargs )
return written
def UpperCamelCase_ ( self : Tuple , _A : BinaryIO , _A : int , **_A : Union[str, Any] ):
_UpperCamelCase = 0
_UpperCamelCase = parquet_writer_kwargs.pop('''path_or_buf''' , _A )
_UpperCamelCase = self.dataset.features.arrow_schema
_UpperCamelCase = pq.ParquetWriter(_A , schema=_A , **_A )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _A ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
_UpperCamelCase = query_table(
table=self.dataset._data , key=slice(_A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_A )
written += batch.nbytes
writer.close()
return written
| 10 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *__snake_case , __snake_case = None , __snake_case=True , __snake_case=2 ):
from .. import __version__
_UpperCamelCase = take_from
_UpperCamelCase = ()
if not isinstance(args[0] , __snake_case ):
_UpperCamelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__snake_case ).base_version ) >= version.parse(__snake_case ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
_UpperCamelCase = None
if isinstance(__snake_case , __snake_case ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__snake_case ),)
_UpperCamelCase = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__snake_case , __snake_case ):
values += (getattr(__snake_case , __snake_case ),)
_UpperCamelCase = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_UpperCamelCase = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_UpperCamelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __snake_case , stacklevel=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0:
_UpperCamelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCamelCase = call_frame.filename
_UpperCamelCase = call_frame.lineno
_UpperCamelCase = call_frame.function
_UpperCamelCase , _UpperCamelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__snake_case ) == 0:
return
elif len(__snake_case ) == 1:
return values[0]
return values
| 10 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_lowerCAmelCase = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def _snake_case ( __snake_case=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowercase ) )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = None
UpperCAmelCase = None
def UpperCamelCase_ ( self : Any , _A : Union[str, Any] , _A : str ):
with TemporaryDirectory() as tmp_dir:
_UpperCamelCase = dataset_module_factory(_A , cache_dir=_A )
_UpperCamelCase = import_main_class(dataset_module.module_path , dataset=_A )
_UpperCamelCase = builder_cls(
cache_dir=_A , config_name=_A , hash=dataset_module.hash , )
_UpperCamelCase = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_A ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
_UpperCamelCase = cached_path(_A , cache_dir=_A )
self.assertTrue(os.path.exists(_A ) )
@pytest.mark.integration
def _snake_case ( __snake_case ):
_UpperCamelCase = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
_UpperCamelCase = dataset_module_factory('''wikipedia''' , cache_dir=__snake_case )
_UpperCamelCase = import_main_class(dataset_module.module_path )
_UpperCamelCase = builder_cls(
cache_dir=__snake_case , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_UpperCamelCase = None
builder_instance.download_and_prepare()
_UpperCamelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _snake_case ( __snake_case ):
_UpperCamelCase = dataset_module_factory('''wikipedia''' , cache_dir=__snake_case )
_UpperCamelCase = import_main_class(dataset_module.module_path , dataset=__snake_case )
_UpperCamelCase = builder_cls(
cache_dir=__snake_case , config_name='''20220301.frr''' , hash=dataset_module.hash , )
_UpperCamelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__snake_case , __snake_case )
assert "train" in ds
assert isinstance(ds['''train'''] , __snake_case )
assert next(iter(ds['''train'''] ) )
| 10 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case , __snake_case ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(__snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__snake_case ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__snake_case , p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __snake_case , __snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_UpperCamelCase = '''lm_head'''
_UpperCamelCase = getattr(__snake_case , __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case , __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case=True ):
if config_path is not None:
_UpperCamelCase = UniSpeechConfig.from_pretrained(__snake_case )
else:
_UpperCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_UpperCamelCase = Dictionary.load_from_json(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase = target_dict.pad_index
_UpperCamelCase = target_dict.bos_index
_UpperCamelCase = target_dict.eos_index
_UpperCamelCase = len(target_dict.symbols )
_UpperCamelCase = os.path.join(__snake_case , '''vocab.json''' )
if not os.path.isdir(__snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
_UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_UpperCamelCase = 42
_UpperCamelCase = 43
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__snake_case , __snake_case )
_UpperCamelCase = WavaVecaPhonemeCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , )
_UpperCamelCase = True if config.feat_extract_norm == '''layer''' else False
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
_UpperCamelCase = UniSpeechForCTC(__snake_case )
else:
_UpperCamelCase = UniSpeechForPreTraining(__snake_case )
if is_finetuned:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_UpperCamelCase = model[0].eval()
recursively_load_weights(__snake_case , __snake_case , __snake_case )
hf_unispeech.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowerCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "trocr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=5_0265 , _A : Optional[Any]=1024 , _A : Optional[Any]=12 , _A : Any=16 , _A : Any=4096 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=512 , _A : Dict=0.1 , _A : List[str]=0.0 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=2 , _A : Any=0.02 , _A : List[str]=0.0 , _A : List[str]=True , _A : str=False , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[int]=1 , _A : int=0 , _A : Any=2 , **_A : Optional[int] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 10 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : List[str] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 224}
_UpperCamelCase = get_size_dict(_A , default_to_square=_A )
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = get_size_dict(_A , param_name='''crop_size''' )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[Any] , ):
_UpperCamelCase = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_UpperCamelCase = int((256 / 224) * size['''shortest_edge'''] )
_UpperCamelCase = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
_UpperCamelCase = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : List[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ):
_UpperCamelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : str , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Optional[int] , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A , default_to_square=_A )
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(_A , param_name='''crop_size''' )
_UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(_A , _A ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(_A , _A , _A ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 10 | import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Any=13 , _A : Union[str, Any]=7 , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[Any]=True , _A : Optional[int]=False , _A : Any=False , _A : int=False , _A : Optional[Any]=2 , _A : Any=99 , _A : str=0 , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : Tuple=4 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=512 , _A : Union[str, Any]=12 , _A : List[str]=2 , _A : int=0.02 , _A : Optional[Any]=3 , _A : Any=4 , _A : Optional[int]="last" , _A : Any=None , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_lengths
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = gelu_activation
_UpperCamelCase = sinusoidal_embeddings
_UpperCamelCase = causal
_UpperCamelCase = asm
_UpperCamelCase = n_langs
_UpperCamelCase = vocab_size
_UpperCamelCase = n_special
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = summary_type
_UpperCamelCase = use_proj
_UpperCamelCase = scope
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_input_lengths:
_UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self : str ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Any , _A : str , _A : Optional[int] , ):
_UpperCamelCase = FlaubertModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , lengths=_A , langs=_A )
_UpperCamelCase = model(_A , langs=_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : str , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int , _A : str , _A : List[Any] , _A : Any , ):
_UpperCamelCase = FlaubertWithLMHeadModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : str , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : Dict , ):
_UpperCamelCase = FlaubertForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Tuple , _A : str , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : int , _A : str , _A : Dict , _A : List[Any] , ):
_UpperCamelCase = FlaubertForQuestionAnswering(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : Tuple , _A : str , _A : int , _A : int , _A : Optional[int] , _A : Optional[int] , _A : int , _A : List[str] , ):
_UpperCamelCase = FlaubertForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[str] , _A : Optional[Any] , _A : str , _A : Union[str, Any] , _A : List[Any] , _A : int , _A : List[Any] , _A : str , _A : List[str] , ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = FlaubertForTokenClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : str , _A : Optional[Any] , _A : List[str] , _A : Any , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : List[str] , ):
_UpperCamelCase = self.num_choices
_UpperCamelCase = FlaubertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict , _A : Dict , _A : Tuple , _A : int , _A : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self : str , _A : Any , _A : List[str] , _A : Optional[int]=False ):
_UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = FlaubertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_A )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=_A )
_UpperCamelCase = self._prepare_for_class(_A , _A )
_UpperCamelCase = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
_UpperCamelCase = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
_UpperCamelCase = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 10 | 1 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase_ :
@staticmethod
def UpperCamelCase_ ( *_A : Optional[int] , **_A : Optional[Any] ):
pass
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCamelCase_ ( self : Optional[Any] , _A : List[Any] , _A : Optional[Any] , _A : List[str] ):
_UpperCamelCase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
_UpperCamelCase = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : str ):
_UpperCamelCase = object_detector(examples[0] , threshold=0.0 )
_UpperCamelCase = len(_A )
self.assertGreater(_A , 0 )
self.assertEqual(
_A , [
{
'''score''': ANY(_A ),
'''label''': ANY(_A ),
'''box''': {'''xmin''': ANY(_A ), '''ymin''': ANY(_A ), '''xmax''': ANY(_A ), '''ymax''': ANY(_A )},
}
for i in range(_A )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCamelCase_ ( self : List[Any] ):
pass
@require_torch
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
_UpperCamelCase = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
_UpperCamelCase = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = pipeline('''zero-shot-object-detection''' )
_UpperCamelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
_UpperCamelCase = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCamelCase_ ( self : List[Any] ):
pass
@require_torch
@slow
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = 0.2
_UpperCamelCase = pipeline('''zero-shot-object-detection''' )
_UpperCamelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=_A , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = 2
_UpperCamelCase = pipeline('''zero-shot-object-detection''' )
_UpperCamelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=_A , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 10 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "swinv2"
UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=224 , _A : Dict=4 , _A : Dict=3 , _A : List[str]=96 , _A : Any=[2, 2, 6, 2] , _A : Any=[3, 6, 12, 24] , _A : List[Any]=7 , _A : int=4.0 , _A : Optional[Any]=True , _A : Dict=0.0 , _A : Union[str, Any]=0.0 , _A : Any=0.1 , _A : Dict="gelu" , _A : int=False , _A : Optional[Any]=0.02 , _A : Union[str, Any]=1e-5 , _A : str=32 , **_A : str , ):
super().__init__(**_A )
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = depths
_UpperCamelCase = len(_A )
_UpperCamelCase = num_heads
_UpperCamelCase = window_size
_UpperCamelCase = mlp_ratio
_UpperCamelCase = qkv_bias
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = drop_path_rate
_UpperCamelCase = hidden_act
_UpperCamelCase = use_absolute_embeddings
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase = int(embed_dim * 2 ** (len(_A ) - 1) )
_UpperCamelCase = (0, 0, 0, 0)
| 10 | from __future__ import annotations
_lowerCAmelCase = [True] * 1_000_001
_lowerCAmelCase = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
_lowerCAmelCase = False
i += 1
def _snake_case ( __snake_case ):
return seive[n]
def _snake_case ( __snake_case ):
return any(digit in '''02468''' for digit in str(__snake_case ) )
def _snake_case ( __snake_case = 1000000 ):
_UpperCamelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__snake_case ) and not contains_an_even_digit(__snake_case ):
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(__snake_case ) )]
if all(is_prime(__snake_case ) for i in list_nums ):
result.append(__snake_case )
return result
def _snake_case ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 10 | 1 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
_lowerCAmelCase, _lowerCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
_lowerCAmelCase = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
_lowerCAmelCase = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_lowerCAmelCase = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 10 | import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = DebertaVaTokenizer
UpperCAmelCase = DebertaVaTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = DebertaVaTokenizer(_A , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict , _A : Union[str, Any] ):
_UpperCamelCase = '''this is a test'''
_UpperCamelCase = '''this is a test'''
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_A ) , 3_0001 )
def UpperCamelCase_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase_ ( self : List[str] ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : int ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Tuple ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(_A )
_UpperCamelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''This is a test'''
_UpperCamelCase = [13, 1, 4398, 25, 21, 1289]
_UpperCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = DebertaVaTokenizer(_A , keep_accents=_A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_UpperCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = DebertaVaTokenizer(_A )
_UpperCamelCase = tokenizer.encode('''sequence builders''' )
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 10 | 1 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 10 | import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10 | 1 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase , _UpperCamelCase = coefficient_matrix.shape
_UpperCamelCase , _UpperCamelCase = constant_matrix.shape
if rowsa != colsa:
_UpperCamelCase = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__snake_case )
if colsa != 1:
_UpperCamelCase = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__snake_case )
if rowsa != rowsa:
_UpperCamelCase = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__snake_case )
if len(__snake_case ) != rowsa:
_UpperCamelCase = (
'''Number of initial values must be equal to number of rows in coefficient '''
f"""matrix but received {len(__snake_case )} and {rowsa}"""
)
raise ValueError(__snake_case )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
_UpperCamelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_UpperCamelCase , _UpperCamelCase = table.shape
strictly_diagonally_dominant(__snake_case )
# Iterates the whole matrix for given number of times
for _ in range(__snake_case ):
_UpperCamelCase = []
for row in range(__snake_case ):
_UpperCamelCase = 0
for col in range(__snake_case ):
if col == row:
_UpperCamelCase = table[row][col]
elif col == cols - 1:
_UpperCamelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_UpperCamelCase = (temp + val) / denom
new_val.append(__snake_case )
_UpperCamelCase = new_val
return [float(__snake_case ) for i in new_val]
def _snake_case ( __snake_case ):
_UpperCamelCase , _UpperCamelCase = table.shape
_UpperCamelCase = True
for i in range(0 , __snake_case ):
_UpperCamelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | import logging
import os
from .state import PartialState
class lowerCAmelCase_ ( logging.LoggerAdapter ):
@staticmethod
def UpperCamelCase_ ( _A : Any ):
_UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : str , *_A : int , **_A : List[Any] ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCamelCase = kwargs.pop('''main_process_only''' , _A )
_UpperCamelCase = kwargs.pop('''in_order''' , _A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
elif in_order:
_UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
state.wait_for_everyone()
def _snake_case ( __snake_case , __snake_case = None ):
if log_level is None:
_UpperCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __snake_case )
_UpperCamelCase = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case , {} )
| 10 | 1 |
_lowerCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_lowerCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = True
_UpperCamelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__snake_case , __snake_case , __snake_case )
order.append(__snake_case )
return order
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = True
_UpperCamelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__snake_case , __snake_case , __snake_case )
return component
def _snake_case ( __snake_case ):
_UpperCamelCase = len(__snake_case ) * [False]
_UpperCamelCase = {vert: [] for vert in range(len(__snake_case ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__snake_case )
_UpperCamelCase = []
for i, was_visited in enumerate(__snake_case ):
if not was_visited:
order += topology_sort(__snake_case , __snake_case , __snake_case )
_UpperCamelCase = []
_UpperCamelCase = len(__snake_case ) * [False]
for i in range(len(__snake_case ) ):
_UpperCamelCase = order[len(__snake_case ) - i - 1]
if not visited[vert]:
_UpperCamelCase = find_components(__snake_case , __snake_case , __snake_case )
components_list.append(__snake_case )
return components_list
| 10 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = "▁"
_lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = BertGenerationTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[str] ):
super().setUp()
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''<s>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_A ) , 1002 )
def UpperCamelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''Hello World!'''
_UpperCamelCase = [1_8536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_UpperCamelCase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCamelCase_ ( self : Dict ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCamelCase = ''' '''.join(_A )
_UpperCamelCase = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = BertGenerationConfig()
_UpperCamelCase = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 10 | 1 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCAmelCase = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 10 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_lowerCAmelCase = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_lowerCAmelCase = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_lowerCAmelCase = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_lowerCAmelCase = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_lowerCAmelCase = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase = DPRContextEncoderTokenizer
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase = DPRQuestionEncoderTokenizer
_lowerCAmelCase = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_lowerCAmelCase = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_lowerCAmelCase = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__lowercase )
class lowerCAmelCase_ :
def __call__( self : Union[str, Any] , _A : List[Any] , _A : Optional[str] = None , _A : Optional[str] = None , _A : Union[bool, str] = False , _A : Union[bool, str] = False , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , **_A : str , ):
if titles is None and texts is None:
return super().__call__(
_A , padding=_A , truncation=_A , max_length=_A , return_tensors=_A , return_attention_mask=_A , **_A , )
elif titles is None or texts is None:
_UpperCamelCase = titles if texts is None else texts
return super().__call__(
_A , _A , padding=_A , truncation=_A , max_length=_A , return_tensors=_A , return_attention_mask=_A , **_A , )
_UpperCamelCase = titles if not isinstance(_A , _A ) else [titles]
_UpperCamelCase = texts if not isinstance(_A , _A ) else [texts]
_UpperCamelCase = len(_A )
_UpperCamelCase = questions if not isinstance(_A , _A ) else [questions] * n_passages
assert len(_A ) == len(
_A ), F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts."""
_UpperCamelCase = super().__call__(_A , _A , padding=_A , truncation=_A )['''input_ids''']
_UpperCamelCase = super().__call__(_A , add_special_tokens=_A , padding=_A , truncation=_A )['''input_ids''']
_UpperCamelCase = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A , _A )
]
}
if return_attention_mask is not False:
_UpperCamelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_UpperCamelCase = attention_mask
return self.pad(_A , padding=_A , max_length=_A , return_tensors=_A )
def UpperCamelCase_ ( self : Tuple , _A : BatchEncoding , _A : DPRReaderOutput , _A : int = 16 , _A : int = 64 , _A : int = 4 , ):
_UpperCamelCase = reader_input['''input_ids''']
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = reader_output[:3]
_UpperCamelCase = len(_A )
_UpperCamelCase = sorted(range(_A ) , reverse=_A , key=relevance_logits.__getitem__ )
_UpperCamelCase = []
for doc_id in sorted_docs:
_UpperCamelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_UpperCamelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCamelCase = sequence_ids.index(self.pad_token_id )
else:
_UpperCamelCase = len(_A )
_UpperCamelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_A , top_spans=_A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_A , start_index=_A , end_index=_A , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase_ ( self : Any , _A : List[int] , _A : List[int] , _A : int , _A : int , ):
_UpperCamelCase = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_UpperCamelCase = sorted(_A , key=lambda _A : x[1] , reverse=_A )
_UpperCamelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
_UpperCamelCase = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowercase )
class lowerCAmelCase_ ( __lowercase, __lowercase ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = DPRReaderTokenizer
| 10 | from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( __snake_case , __snake_case ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__snake_case , __snake_case ) ) )
def _snake_case ( __snake_case , __snake_case ):
if dataset.ndim != value_array.ndim:
_UpperCamelCase = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = []
for value in value_array:
_UpperCamelCase = euclidean(__snake_case , dataset[0] )
_UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase = euclidean(__snake_case , __snake_case )
if dist > temp_dist:
_UpperCamelCase = temp_dist
_UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( __snake_case , __snake_case ):
return np.dot(__snake_case , __snake_case ) / (norm(__snake_case ) * norm(__snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_lowerCAmelCase = datasets.logging.get_logger(__name__)
_lowerCAmelCase = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
_lowerCAmelCase = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
_lowerCAmelCase = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
_lowerCAmelCase = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def UpperCamelCase_ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def UpperCamelCase_ ( self : List[Any] , _A : int ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
_UpperCamelCase = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
_UpperCamelCase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_UpperCamelCase = self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
_UpperCamelCase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_UpperCamelCase = score.BleurtScorer(os.path.join(_A , _A ) )
def UpperCamelCase_ ( self : Dict , _A : List[str] , _A : Dict ):
_UpperCamelCase = self.scorer.score(references=_A , candidates=_A )
return {"scores": scores}
| 10 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ShapEPipeline
UpperCAmelCase = ["prompt"]
UpperCAmelCase = ["prompt"]
UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase = PriorTransformer(**_A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase = ShapERenderer(**_A )
return model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.dummy_prior
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = self.dummy_tokenizer
_UpperCamelCase = self.dummy_renderer
_UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
_UpperCamelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch_device == '''cpu'''
_UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase = batch_size * [inputs[key]]
_UpperCamelCase = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(0 )
_UpperCamelCase = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 10 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.