code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _lowerCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1.0 , _UpperCamelCase = None , ) -> List[Any]:
super().__init__()
lowerCAmelCase_ = initial_learning_rate
lowerCAmelCase_ = warmup_steps
lowerCAmelCase_ = power
lowerCAmelCase_ = decay_schedule_fn
lowerCAmelCase_ = name
def __call__( self , _UpperCamelCase ) -> List[str]:
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase_ = tf.cast(_UpperCamelCase , tf.floataa )
lowerCAmelCase_ = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase_ = global_step_float / warmup_steps_float
lowerCAmelCase_ = self.initial_learning_rate * tf.math.pow(_UpperCamelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_UpperCamelCase , )
def __a ( self ) -> Tuple:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def lowerCamelCase__ ( __lowerCAmelCase : float , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : float = 0.9 , __lowerCAmelCase : float = 0.999 , __lowerCAmelCase : float = 1e-8 , __lowerCAmelCase : Optional[float] = None , __lowerCAmelCase : Optional[float] = None , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : Optional[List[str]] = None , ):
"""simple docstring"""
lowerCAmelCase_ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCAmelCase , )
if num_warmup_steps:
lowerCAmelCase_ = WarmUp(
initial_learning_rate=__lowerCAmelCase , decay_schedule_fn=__lowerCAmelCase , warmup_steps=__lowerCAmelCase , )
if weight_decay_rate > 0.0:
lowerCAmelCase_ = AdamWeightDecay(
learning_rate=__lowerCAmelCase , weight_decay_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=__lowerCAmelCase , )
else:
lowerCAmelCase_ = tf.keras.optimizers.Adam(
learning_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase = 0.001 , _UpperCamelCase = 0.9 , _UpperCamelCase = 0.999 , _UpperCamelCase = 1e-7 , _UpperCamelCase = False , _UpperCamelCase = 0.0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "AdamWeightDecay" , **_UpperCamelCase , ) -> Optional[Any]:
super().__init__(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = weight_decay_rate
lowerCAmelCase_ = include_in_weight_decay
lowerCAmelCase_ = exclude_from_weight_decay
@classmethod
def __a ( cls , _UpperCamelCase ) -> Optional[int]:
lowerCAmelCase_ = {"WarmUp": WarmUp}
return super(_UpperCamelCase , cls ).from_config(_UpperCamelCase , custom_objects=_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
super(_UpperCamelCase , self )._prepare_local(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def __a ( self , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ = list(zip(*_UpperCamelCase ) )
return super(_UpperCamelCase , self ).apply_gradients(zip(_UpperCamelCase , _UpperCamelCase ) , name=_UpperCamelCase , **_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase_ = apply_state or {}
lowerCAmelCase_ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase_ = self._fallback_apply_state(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ = self._get_lr(var.device , var.dtype.base_dtype , _UpperCamelCase )
lowerCAmelCase_ = self._decay_weights_op(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
with tf.control_dependencies([decay] ):
return super(_UpperCamelCase , self )._resource_apply_dense(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ = self._get_lr(var.device , var.dtype.base_dtype , _UpperCamelCase )
lowerCAmelCase_ = self._decay_weights_op(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
with tf.control_dependencies([decay] ):
return super(_UpperCamelCase , self )._resource_apply_sparse(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def __a ( self , _UpperCamelCase ) -> int:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_UpperCamelCase , _UpperCamelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_UpperCamelCase , _UpperCamelCase ) is not None:
return False
return True
class _lowerCAmelCase ( __a ):
def __init__( self ) -> Tuple:
lowerCAmelCase_ = []
lowerCAmelCase_ = None
@property
def __a ( self ) -> Dict:
if self._accum_steps is None:
lowerCAmelCase_ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_UpperCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __a ( self ) -> Optional[int]:
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , _UpperCamelCase ) -> List[str]:
if not self._gradients:
lowerCAmelCase_ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_UpperCamelCase ) , trainable=_UpperCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_UpperCamelCase ) != len(self._gradients ):
raise ValueError(f"""Expected {len(self._gradients )} gradients, but got {len(_UpperCamelCase )}""" )
for accum_gradient, gradient in zip(self._gradients , _UpperCamelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_UpperCamelCase )
self._accum_steps.assign_add(1 )
def __a ( self ) -> str:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_UpperCamelCase ) )
| 231 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _lowerCAmelCase ( __a ):
_lowercase ='''sew'''
def __init__( self , _UpperCamelCase=32 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3_072 , _UpperCamelCase=2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase="group" , _UpperCamelCase="gelu" , _UpperCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _UpperCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _UpperCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _UpperCamelCase=False , _UpperCamelCase=128 , _UpperCamelCase=16 , _UpperCamelCase=True , _UpperCamelCase=0.05 , _UpperCamelCase=10 , _UpperCamelCase=2 , _UpperCamelCase=0.0 , _UpperCamelCase=10 , _UpperCamelCase=0 , _UpperCamelCase="mean" , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=256 , _UpperCamelCase=0 , _UpperCamelCase=1 , _UpperCamelCase=2 , **_UpperCamelCase , ) -> Union[str, Any]:
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = feat_extract_norm
lowerCAmelCase_ = feat_extract_activation
lowerCAmelCase_ = list(_UpperCamelCase )
lowerCAmelCase_ = list(_UpperCamelCase )
lowerCAmelCase_ = list(_UpperCamelCase )
lowerCAmelCase_ = conv_bias
lowerCAmelCase_ = num_conv_pos_embeddings
lowerCAmelCase_ = num_conv_pos_embedding_groups
lowerCAmelCase_ = len(self.conv_dim )
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = squeeze_factor
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = feat_proj_dropout
lowerCAmelCase_ = final_dropout
lowerCAmelCase_ = layerdrop
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ = apply_spec_augment
lowerCAmelCase_ = mask_time_prob
lowerCAmelCase_ = mask_time_length
lowerCAmelCase_ = mask_time_min_masks
lowerCAmelCase_ = mask_feature_prob
lowerCAmelCase_ = mask_feature_length
lowerCAmelCase_ = mask_feature_min_masks
# ctc loss
lowerCAmelCase_ = ctc_loss_reduction
lowerCAmelCase_ = ctc_zero_infinity
# sequence classification
lowerCAmelCase_ = use_weighted_layer_sum
lowerCAmelCase_ = classifier_proj_size
@property
def __a ( self ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 231 | 1 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
UpperCamelCase__ : Optional[int] = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]:
"""simple docstring"""
a = np.argmax(snake_case_, axis=1 )
return np.sum(outputs == labels )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
with open(snake_case_, encoding='''utf_8''' ) as f:
a = csv.reader(snake_case_ )
a = []
next(snake_case_ ) # skip the first line
for line in tqdm(snake_case_ ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a = []
for dataset in encoded_datasets:
a = len(snake_case_ )
a = np.zeros((n_batch, 2, input_len), dtype=np.intaa )
a = np.zeros((n_batch, 2), dtype=np.intaa )
a = np.full((n_batch, 2, input_len), fill_value=-1_0_0, dtype=np.intaa )
a = np.zeros((n_batch,), dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(snake_case_ ):
a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a = with_conta
a = with_conta
a = len(snake_case_ ) - 1
a = len(snake_case_ ) - 1
a = with_conta
a = with_conta
a = mc_label
a = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(snake_case_ ) for t in all_inputs ) )
return tensor_datasets
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a = argparse.ArgumentParser()
parser.add_argument('''--model_name''', type=snake_case_, default='''openai-gpt''', help='''pretrained model name''' )
parser.add_argument('''--do_train''', action='''store_true''', help='''Whether to run training.''' )
parser.add_argument('''--do_eval''', action='''store_true''', help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''', default=snake_case_, type=snake_case_, required=snake_case_, help='''The output directory where the model predictions and checkpoints will be written.''', )
parser.add_argument('''--train_dataset''', type=snake_case_, default='''''' )
parser.add_argument('''--eval_dataset''', type=snake_case_, default='''''' )
parser.add_argument('''--seed''', type=snake_case_, default=4_2 )
parser.add_argument('''--num_train_epochs''', type=snake_case_, default=3 )
parser.add_argument('''--train_batch_size''', type=snake_case_, default=8 )
parser.add_argument('''--eval_batch_size''', type=snake_case_, default=1_6 )
parser.add_argument('''--adam_epsilon''', default=1e-8, type=snake_case_, help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''', type=snake_case_, default=1 )
parser.add_argument(
'''--max_steps''', default=-1, type=snake_case_, help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
), )
parser.add_argument(
'''--gradient_accumulation_steps''', type=snake_case_, default=1, help='''Number of updates steps to accumulate before performing a backward/update pass.''', )
parser.add_argument('''--learning_rate''', type=snake_case_, default=6.25e-5 )
parser.add_argument('''--warmup_steps''', default=0, type=snake_case_, help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''', type=snake_case_, default='''warmup_linear''' )
parser.add_argument('''--weight_decay''', type=snake_case_, default=0.01 )
parser.add_argument('''--lm_coef''', type=snake_case_, default=0.9 )
parser.add_argument('''--n_valid''', type=snake_case_, default=3_7_4 )
parser.add_argument('''--server_ip''', type=snake_case_, default='''''', help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''', type=snake_case_, default='''''', help='''Can be used for distant debugging.''' )
a = parser.parse_args()
print(snake_case_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=snake_case_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
a = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
a = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(snake_case_, snake_case_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
a = ['''_start_''', '''_delimiter_''', '''_classify_''']
a = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(snake_case_ )
a = tokenizer.convert_tokens_to_ids(snake_case_ )
a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(snake_case_ ) )
model.to(snake_case_ )
# Load and encode the datasets
def tokenize_and_encode(snake_case_ ):
if isinstance(snake_case_, snake_case_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(snake_case_ ) )
elif isinstance(snake_case_, snake_case_ ):
return obj
return [tokenize_and_encode(snake_case_ ) for o in obj]
logger.info('''Encoding dataset...''' )
a = load_rocstories_dataset(args.train_dataset )
a = load_rocstories_dataset(args.eval_dataset )
a = (train_dataset, eval_dataset)
a = tokenize_and_encode(snake_case_ )
# Compute the max input length for the Transformer
a = model.config.n_positions // 2 - 2
a = max(
len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
a = min(snake_case_, model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
a = pre_process_datasets(snake_case_, snake_case_, snake_case_, *snake_case_ )
a , a = tensor_datasets[0], tensor_datasets[1]
a = TensorDataset(*snake_case_ )
a = RandomSampler(snake_case_ )
a = DataLoader(snake_case_, sampler=snake_case_, batch_size=args.train_batch_size )
a = TensorDataset(*snake_case_ )
a = SequentialSampler(snake_case_ )
a = DataLoader(snake_case_, sampler=snake_case_, batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
a = args.max_steps
a = args.max_steps // (len(snake_case_ ) // args.gradient_accumulation_steps) + 1
else:
a = len(snake_case_ ) // args.gradient_accumulation_steps * args.num_train_epochs
a = list(model.named_parameters() )
a = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
a = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
a = AdamW(snake_case_, lr=args.learning_rate, eps=args.adam_epsilon )
a = get_linear_schedule_with_warmup(
snake_case_, num_warmup_steps=args.warmup_steps, num_training_steps=snake_case_ )
if args.do_train:
a , a , a = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ), desc='''Epoch''' ):
a = 0
a = 0
a = tqdm(snake_case_, desc='''Training''' )
for step, batch in enumerate(snake_case_ ):
a = tuple(t.to(snake_case_ ) for t in batch )
a , a , a , a = batch
a = model(snake_case_, mc_token_ids=snake_case_, lm_labels=snake_case_, mc_labels=snake_case_ )
a = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
a = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
a = '''Training loss: {:.2e} lr: {:.2e}'''.format(snake_case_, scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
a = model.module if hasattr(snake_case_, '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
a = os.path.join(args.output_dir, snake_case_ )
a = os.path.join(args.output_dir, snake_case_ )
torch.save(model_to_save.state_dict(), snake_case_ )
model_to_save.config.to_json_file(snake_case_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
a = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(snake_case_ )
if args.do_eval:
model.eval()
a , a = 0, 0
a , a = 0, 0
for batch in tqdm(snake_case_, desc='''Evaluating''' ):
a = tuple(t.to(snake_case_ ) for t in batch )
a , a , a , a = batch
with torch.no_grad():
a , a , a , a = model(
snake_case_, mc_token_ids=snake_case_, lm_labels=snake_case_, mc_labels=snake_case_ )
a = mc_logits.detach().cpu().numpy()
a = mc_labels.to('''cpu''' ).numpy()
a = accuracy(snake_case_, snake_case_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
a = eval_loss / nb_eval_steps
a = eval_accuracy / nb_eval_examples
a = tr_loss / nb_tr_steps if args.do_train else None
a = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
a = os.path.join(args.output_dir, '''eval_results.txt''' )
with open(snake_case_, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''', snake_case_, str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 350 |
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int:
"""simple docstring"""
a = ''''''
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]:
"""simple docstring"""
a = ''''''
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Dict:
"""simple docstring"""
a = int('''0b''' + data[0] + data[-1], 2 )
a = int('''0b''' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = message[:4]
a = message[4:]
a = apply_table(snake_case_, snake_case_ )
a = xor(snake_case_, snake_case_ )
a = apply_sbox(snake_case_, temp[:4] ) # noqa: E741
a = apply_sbox(snake_case_, temp[4:] )
a = '''0''' * (2 - len(snake_case_ )) + l # noqa: E741
a = '''0''' * (2 - len(snake_case_ )) + r
a = apply_table(l + r, snake_case_ )
a = xor(snake_case_, snake_case_ )
return temp + right
if __name__ == "__main__":
UpperCamelCase__ : int = input("""Enter 10 bit key: """)
UpperCamelCase__ : Union[str, Any] = input("""Enter 8 bit message: """)
UpperCamelCase__ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
UpperCamelCase__ : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
UpperCamelCase__ : Optional[int] = [2, 4, 3, 1]
UpperCamelCase__ : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCamelCase__ : str = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCamelCase__ : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCamelCase__ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCamelCase__ : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCamelCase__ : Optional[Any] = apply_table(key, paa_table)
UpperCamelCase__ : str = temp[:5]
UpperCamelCase__ : List[Any] = temp[5:]
UpperCamelCase__ : Dict = left_shift(left)
UpperCamelCase__ : Any = left_shift(right)
UpperCamelCase__ : Optional[Any] = apply_table(left + right, pa_table)
UpperCamelCase__ : List[str] = left_shift(left)
UpperCamelCase__ : int = left_shift(right)
UpperCamelCase__ : List[str] = left_shift(left)
UpperCamelCase__ : Dict = left_shift(right)
UpperCamelCase__ : List[str] = apply_table(left + right, pa_table)
# encryption
UpperCamelCase__ : Tuple = apply_table(message, IP)
UpperCamelCase__ : Optional[Any] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Optional[int] = temp[4:] + temp[:4]
UpperCamelCase__ : Any = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Tuple = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
UpperCamelCase__ : Union[str, Any] = apply_table(CT, IP)
UpperCamelCase__ : List[str] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Optional[Any] = temp[4:] + temp[:4]
UpperCamelCase__ : Optional[int] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Any = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 330 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
lowercase_ = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
lowercase_ = {
"ctrl": 2_5_6,
}
lowercase_ = {
"Pregnancy": 1_6_8_6_2_9,
"Christianity": 7_6_7_5,
"Explain": 1_0_6_4_2_3,
"Fitness": 6_3_4_4_0,
"Saving": 6_3_1_6_3,
"Ask": 2_7_1_7_1,
"Ass": 9_5_9_8_5,
"Joke": 1_6_3_5_0_9,
"Questions": 4_5_6_2_2,
"Thoughts": 4_9_6_0_5,
"Retail": 5_2_3_4_2,
"Feminism": 1_6_4_3_3_8,
"Writing": 1_1_9_9_2,
"Atheism": 1_9_2_2_6_3,
"Netflix": 4_8_6_1_6,
"Computing": 3_9_6_3_9,
"Opinion": 4_3_2_1_3,
"Alone": 4_4_9_6_7,
"Funny": 5_8_9_1_7,
"Gaming": 4_0_3_5_8,
"Human": 4_0_8_8,
"India": 1_3_3_1,
"Joker": 7_7_1_3_8,
"Diet": 3_6_2_0_6,
"Legal": 1_1_8_5_9,
"Norman": 4_9_3_9,
"Tip": 7_2_6_8_9,
"Weight": 5_2_3_4_3,
"Movies": 4_6_2_7_3,
"Running": 2_3_4_2_5,
"Science": 2_0_9_0,
"Horror": 3_7_7_9_3,
"Confession": 6_0_5_7_2,
"Finance": 1_2_2_5_0,
"Politics": 1_6_3_6_0,
"Scary": 1_9_1_9_8_5,
"Support": 1_2_6_5_4,
"Technologies": 3_2_5_1_6,
"Teenage": 6_6_1_6_0,
"Event": 3_2_7_6_9,
"Learned": 6_7_4_6_0,
"Notion": 1_8_2_7_7_0,
"Wikipedia": 3_7_5_8_3,
"Books": 6_6_6_5,
"Extract": 7_6_0_5_0,
"Confessions": 1_0_2_7_0_1,
"Conspiracy": 7_5_9_3_2,
"Links": 6_3_6_7_4,
"Narcissus": 1_5_0_4_2_5,
"Relationship": 5_4_7_6_6,
"Relationships": 1_3_4_7_9_6,
"Reviews": 4_1_6_7_1,
"News": 4_2_5_6,
"Translation": 2_6_8_2_0,
"multilingual": 1_2_8_4_0_6,
}
def lowercase ( lowerCAmelCase__ : List[str] ) -> str:
__a = set()
__a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a = char
__a = set(lowerCAmelCase__ )
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : int = CONTROL_CODES
def __init__( self , _a , _a , _a="<unk>" , **_a ):
super().__init__(unk_token=_a , **_a )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
__a = json.load(_a )
__a = {v: k for k, v in self.encoder.items()}
with open(_a , encoding='''utf-8''' ) as merges_handle:
__a = merges_handle.read().split('''\n''' )[1:-1]
__a = [tuple(merge.split() ) for merge in merges]
__a = dict(zip(_a , range(len(_a ) ) ) )
__a = {}
@property
def __UpperCAmelCase ( self ):
return len(self.encoder )
def __UpperCAmelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , _a ):
if token in self.cache:
return self.cache[token]
__a = tuple(_a )
__a = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__a = get_pairs(_a )
if not pairs:
return token
while True:
__a = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a = bigram
__a = []
__a = 0
while i < len(_a ):
try:
__a = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a = tuple(_a )
__a = new_word
if len(_a ) == 1:
break
else:
__a = get_pairs(_a )
__a = '''@@ '''.join(_a )
__a = word[:-4]
__a = word
return word
def __UpperCAmelCase ( self , _a ):
__a = []
__a = re.findall(R'''\S+\n?''' , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(''' ''' ) ) )
return split_tokens
def __UpperCAmelCase ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self , _a ):
return self.decoder.get(_a , self.unk_token )
def __UpperCAmelCase ( self , _a ):
__a = ''' '''.join(_a ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __UpperCAmelCase ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '''\n''' )
__a = 0
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__a = token_index
writer.write(''' '''.join(_a ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 45 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : list[str] ) -> str:
__a = ''''''
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 45 | 1 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __SCREAMING_SNAKE_CASE ( A_ , A_ = True , A_ = math.inf , A_ = -math.inf , A_ = math.inf , A_ = -math.inf , A_ = False , A_ = 1_00 , A_ = 0.01 , A_ = 1 , ):
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : Tuple = search_prob
lowerCAmelCase__ : Optional[int] = start_temperate
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Union[str, Any] = None
while not search_end:
lowerCAmelCase__ : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCAmelCase__ : Optional[Any] = current_state
scores.append(A_ )
iterations += 1
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCAmelCase__ : Optional[int] = random.randint(0 , len(A_ ) - 1 ) # picking a random neighbor
lowerCAmelCase__ : Dict = neighbors.pop(A_ )
lowerCAmelCase__ : Any = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCAmelCase__ : Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCAmelCase__ : int = picked_neighbor
else:
lowerCAmelCase__ : Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCAmelCase__ : Optional[int] = picked_neighbor
lowerCAmelCase__ : Tuple = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCAmelCase__ : Optional[Any] = True
else:
lowerCAmelCase__ : Any = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ) , A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__UpperCamelCase : str = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
__UpperCamelCase : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__UpperCamelCase : Any = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
__UpperCamelCase : int = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return (3 * x**2) - (6 * y)
__UpperCamelCase : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__UpperCamelCase : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
__UpperCamelCase : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__UpperCamelCase : Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
| 74 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__UpperCamelCase : Any = NewType('''DataClass''', Any)
__UpperCamelCase : List[str] = NewType('''DataClassType''', Any)
def __SCREAMING_SNAKE_CASE ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : int = {str(A_ ): choice for choice in choices}
return lambda A_ : str_to_choice.get(A_ , A_ )
def __SCREAMING_SNAKE_CASE ( *,
A_ = None , A_ = None , A_ = dataclasses.MISSING , A_ = dataclasses.MISSING , A_ = None , **A_ , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase__ : Dict = {}
if aliases is not None:
lowerCAmelCase__ : int = aliases
if help is not None:
lowerCAmelCase__ : Optional[int] = help
return dataclasses.field(metadata=A_ , default=A_ , default_factory=A_ , **A_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
def __init__( self : Dict ,lowercase_ : Union[DataClassType, Iterable[DataClassType]] ,**lowercase_ : str ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase__ : Tuple = ArgumentDefaultsHelpFormatter
super().__init__(**lowercase_ )
if dataclasses.is_dataclass(lowercase_ ):
lowerCAmelCase__ : Tuple = [dataclass_types]
lowerCAmelCase__ : List[str] = list(lowercase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowercase_ )
@staticmethod
def __lowerCAmelCase ( lowercase_ : ArgumentParser ,lowercase_ : dataclasses.Field ):
lowerCAmelCase__ : Dict = F'--{field.name}'
lowerCAmelCase__ : List[str] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,lowercase_ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowerCAmelCase__ : List[str] = kwargs.pop('''aliases''' ,[] )
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : Optional[Any] = [aliases]
lowerCAmelCase__ : Union[str, Any] = getattr(field.type ,'''__origin__''' ,field.type )
if origin_type is Union or (hasattr(lowercase_ ,'''UnionType''' ) and isinstance(lowercase_ ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowercase_ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F' Problem encountered in field \'{field.name}\'.' )
if type(lowercase_ ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase__ : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase__ : List[str] = getattr(field.type ,'''__origin__''' ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase__ : Optional[int] = (
field.type.__args__[0] if isinstance(lowercase_ ,field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase__ : Optional[Any] = getattr(field.type ,'''__origin__''' ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase__ : List[Any] = {}
if origin_type is Literal or (isinstance(field.type ,lowercase_ ) and issubclass(field.type ,lowercase_ )):
if origin_type is Literal:
lowerCAmelCase__ : Union[str, Any] = field.type.__args__
else:
lowerCAmelCase__ : Optional[Any] = [x.value for x in field.type]
lowerCAmelCase__ : List[str] = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase__ : int = field.default
else:
lowerCAmelCase__ : Any = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase__ : List[Any] = copy(lowercase_ )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase__ : Tuple = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase__ : List[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase__ : Tuple = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase__ : Union[str, Any] = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase__ : Any = True
elif isclass(lowercase_ ) and issubclass(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : List[str] = field.type.__args__[0]
lowerCAmelCase__ : str = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase__ : Dict = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase__ : str = True
else:
lowerCAmelCase__ : List[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase__ : str = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase__ : Any = field.default_factory()
else:
lowerCAmelCase__ : Optional[Any] = True
parser.add_argument(lowercase_ ,*lowercase_ ,**lowercase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase__ : Optional[Any] = False
parser.add_argument(F'--no_{field.name}' ,action='''store_false''' ,dest=field.name ,**lowercase_ )
def __lowerCAmelCase ( self : str ,lowercase_ : DataClassType ):
if hasattr(lowercase_ ,'''_argument_group_name''' ):
lowerCAmelCase__ : Optional[int] = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase__ : List[str] = self
try:
lowerCAmelCase__ : Dict[str, type] = get_type_hints(lowercase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(lowercase_ ):
lowerCAmelCase__ : int = '''.'''.join(map(lowercase_ ,sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(lowercase_ ):
if not field.init:
continue
lowerCAmelCase__ : Any = type_hints[field.name]
self._parse_dataclass_field(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Any ,lowercase_ : Optional[Any]=None ,lowercase_ : str=False ,lowercase_ : str=True ,lowercase_ : Any=None ,lowercase_ : List[str]=None ,):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase__ : int = []
if args_filename:
args_files.append(Path(lowercase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase__ : List[str] = ArgumentParser()
args_file_parser.add_argument(lowercase_ ,type=lowercase_ ,action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = args_file_parser.parse_known_args(args=lowercase_ )
lowerCAmelCase__ : int = vars(lowercase_ ).get(args_file_flag.lstrip('''-''' ) ,lowercase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowercase_ ) for p in cmd_args_file_paths] )
lowerCAmelCase__ : Tuple = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase__ : Dict = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.parse_known_args(args=lowercase_ )
lowerCAmelCase__ : Optional[Any] = []
for dtype in self.dataclass_types:
lowerCAmelCase__ : int = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
lowerCAmelCase__ : int = {k: v for k, v in vars(lowercase_ ).items() if k in keys}
for k in keys:
delattr(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Optional[Any] = dtype(**lowercase_ )
outputs.append(lowercase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowercase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def __lowerCAmelCase ( self : Any ,lowercase_ : Dict[str, Any] ,lowercase_ : bool = False ):
lowerCAmelCase__ : List[Any] = set(args.keys() )
lowerCAmelCase__ : Any = []
for dtype in self.dataclass_types:
lowerCAmelCase__ : Optional[Any] = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
lowerCAmelCase__ : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase__ : Union[str, Any] = dtype(**lowercase_ )
outputs.append(lowercase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(lowercase_ )}' )
return tuple(lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : str ,lowercase_ : bool = False ):
with open(Path(lowercase_ ) ,encoding='''utf-8''' ) as open_json_file:
lowerCAmelCase__ : Union[str, Any] = json.loads(open_json_file.read() )
lowerCAmelCase__ : List[str] = self.parse_dict(lowercase_ ,allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : str ,lowercase_ : bool = False ):
lowerCAmelCase__ : Tuple = self.parse_dict(yaml.safe_load(Path(lowercase_ ).read_text() ) ,allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
| 74 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( A__ ):
A__ = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
A__ = 'CIDAS/clipseg-rd64-refined'
A__ = 'image_segmenter'
A__ = CLIPSegForImageSegmentation
A__ = ['image', 'text']
A__ = ['image']
def __init__( self : Any , *_a : Dict , **_a : str ) -> Any:
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*_a , **_a )
def A ( self : int , _a : "Image" , _a : str ) -> Optional[Any]:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_a , return_tensors='pt' )
def A ( self : Dict , _a : Dict ) -> str:
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE =self.model(**_a ).logits
return logits
def A ( self : Any , _a : str ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =outputs.cpu().detach().numpy()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 47 |
from math import isqrt
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) )
def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int:
'''simple docstring'''
__lowercase= 0
__lowercase= 1
__lowercase= 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 295 | 0 |
"""simple docstring"""
import os
_A = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : int = 0
lowerCamelCase : Dict = 0
while index < len(a_ ) - 1:
lowerCamelCase : Tuple = SYMBOLS[numerals[index]]
lowerCamelCase : Dict = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Tuple = ''
lowerCamelCase : int = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCamelCase : List[Any] = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCamelCase : str = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase ( a_ = "/p089_roman.txt" ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = 0
with open(os.path.dirname(a_ ) + roman_numerals_filename ) as filea:
lowerCamelCase : Optional[int] = filea.readlines()
for line in lines:
lowerCamelCase : str = line.strip()
lowerCamelCase : Dict = parse_roman_numerals(a_ )
lowerCamelCase : int = generate_roman_numerals(a_ )
savings += len(a_ ) - len(a_ )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 362 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_A = logging.get_logger('transformers.models.speecht5')
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
hf_model.apply_weight_norm()
lowerCamelCase : str = checkpoint['input_conv.weight_g']
lowerCamelCase : int = checkpoint['input_conv.weight_v']
lowerCamelCase : Optional[Any] = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCamelCase : Tuple = checkpoint[F"""upsamples.{i}.1.weight_g"""]
lowerCamelCase : Any = checkpoint[F"""upsamples.{i}.1.weight_v"""]
lowerCamelCase : List[str] = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCamelCase : Optional[Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
lowerCamelCase : Tuple = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
lowerCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
lowerCamelCase : Dict = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
lowerCamelCase : str = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
lowerCamelCase : Optional[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
lowerCamelCase : Any = checkpoint['output_conv.1.weight_g']
lowerCamelCase : Tuple = checkpoint['output_conv.1.weight_v']
lowerCamelCase : int = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCAmelCase ( a_, a_, a_, a_=None, a_=None, ):
'''simple docstring'''
if config_path is not None:
lowerCamelCase : str = SpeechTaHifiGanConfig.from_pretrained(a_ )
else:
lowerCamelCase : Dict = SpeechTaHifiGanConfig()
lowerCamelCase : int = SpeechTaHifiGan(a_ )
lowerCamelCase : Optional[Any] = torch.load(a_ )
load_weights(orig_checkpoint['model']['generator'], a_, a_ )
lowerCamelCase : Tuple = np.load(a_ )
lowerCamelCase : str = stats[0].reshape(-1 )
lowerCamelCase : Optional[int] = stats[1].reshape(-1 )
lowerCamelCase : Dict = torch.from_numpy(a_ ).float()
lowerCamelCase : Optional[int] = torch.from_numpy(a_ ).float()
model.save_pretrained(a_ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(a_ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_A = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 205 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
UpperCAmelCase_ = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
UpperCAmelCase_ = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
UpperCAmelCase_ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
UpperCAmelCase_ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
UpperCAmelCase_ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : int = VOCAB_FILES_NAMES
a : Any = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Any = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Union[str, Any] = VOCAB_FILES_NAMES
a : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
UpperCAmelCase_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
UpperCAmelCase_ = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__lowerCamelCase )
class lowercase__ :
'''simple docstring'''
def __call__( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__ = False, __magic_name__ = False, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, **__magic_name__, ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
__magic_name__, padding=__magic_name__, truncation=__magic_name__, max_length=__magic_name__, return_tensors=__magic_name__, return_attention_mask=__magic_name__, **__magic_name__, )
elif titles is None or texts is None:
UpperCamelCase__ : Optional[Any] = titles if texts is None else texts
return super().__call__(
__magic_name__, __magic_name__, padding=__magic_name__, truncation=__magic_name__, max_length=__magic_name__, return_tensors=__magic_name__, return_attention_mask=__magic_name__, **__magic_name__, )
UpperCamelCase__ : Optional[int] = titles if not isinstance(__magic_name__, __magic_name__ ) else [titles]
UpperCamelCase__ : Dict = texts if not isinstance(__magic_name__, __magic_name__ ) else [texts]
UpperCamelCase__ : Optional[Any] = len(__magic_name__ )
UpperCamelCase__ : Dict = questions if not isinstance(__magic_name__, __magic_name__ ) else [questions] * n_passages
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"There should be as many titles than texts but got {len(__magic_name__ )} titles and {len(__magic_name__ )} texts." )
UpperCamelCase__ : Any = super().__call__(__magic_name__, __magic_name__, padding=__magic_name__, truncation=__magic_name__ )['''input_ids''']
UpperCamelCase__ : Optional[int] = super().__call__(__magic_name__, add_special_tokens=__magic_name__, padding=__magic_name__, truncation=__magic_name__ )['''input_ids''']
UpperCamelCase__ : Union[str, Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__magic_name__, __magic_name__ )
]
}
if return_attention_mask is not False:
UpperCamelCase__ : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCamelCase__ : Union[str, Any] = attention_mask
return self.pad(__magic_name__, padding=__magic_name__, max_length=__magic_name__, return_tensors=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = 16, __magic_name__ = 64, __magic_name__ = 4, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
UpperCamelCase__ : int = reader_input['''input_ids''']
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = reader_output[:3]
UpperCamelCase__ : Tuple = len(__magic_name__ )
UpperCamelCase__ : int = sorted(range(__magic_name__ ), reverse=__magic_name__, key=relevance_logits.__getitem__ )
UpperCamelCase__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCamelCase__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCamelCase__ : Optional[Any] = sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCamelCase__ : Tuple = sequence_ids.index(self.pad_token_id )
else:
UpperCamelCase__ : int = len(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=__magic_name__, top_spans=__magic_name__, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=__magic_name__, start_index=__magic_name__, end_index=__magic_name__, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(__magic_name__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
UpperCamelCase__ : Dict = []
for start_index, start_score in enumerate(__magic_name__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCamelCase__ : List[Any] = sorted(__magic_name__, key=lambda __magic_name__ : x[1], reverse=__magic_name__ )
UpperCamelCase__ : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
UpperCamelCase__ : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__magic_name__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowerCamelCase )
class lowercase__ ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
a : Tuple = VOCAB_FILES_NAMES
a : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
a : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
a : List[Any] = ["input_ids", "attention_mask"]
| 201 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCAmelCase_ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: int , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: str , __UpperCAmelCase: Any ) -> List[str]:
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : Tuple = '''lm_head'''
UpperCamelCase__ : Optional[int] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
UpperCamelCase__ : List[str] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
UpperCamelCase__ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCamelCase__ : List[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : List[str] = value
elif weight_type == "weight_v":
UpperCamelCase__ : str = value
elif weight_type == "bias":
UpperCamelCase__ : Tuple = value
else:
UpperCamelCase__ : int = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Dict , __UpperCAmelCase: str ) -> List[Any]:
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : str = fairseq_model.state_dict()
UpperCamelCase__ : Optional[int] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCamelCase__ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : int = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCamelCase__ : Tuple = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
UpperCamelCase__ : Optional[Any] = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
UpperCamelCase__ : List[str] = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ : Dict = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ : List[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Optional[Any] = '''weight'''
else:
UpperCamelCase__ : List[Any] = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Any , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Tuple ) -> Optional[int]:
UpperCamelCase__ : List[str] = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ : List[str] = name.split('''.''' )
UpperCamelCase__ : str = int(items[0] )
UpperCamelCase__ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCamelCase__ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCamelCase__ : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCamelCase__ : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCamelCase__ : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Dict=None , __UpperCAmelCase: Optional[Any]=None , __UpperCAmelCase: Optional[int]=True ) -> Union[str, Any]:
if config_path is not None:
UpperCamelCase__ : str = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
UpperCamelCase__ : List[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : str = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : Any = target_dict.pad_index
UpperCamelCase__ : str = target_dict.bos_index
UpperCamelCase__ : Any = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols )
UpperCamelCase__ : List[str] = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCamelCase__ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Optional[Any] = 42
UpperCamelCase__ : List[str] = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Dict = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
UpperCamelCase__ : List[Any] = True if config.feat_extract_norm == '''layer''' else False
UpperCamelCase__ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
UpperCamelCase__ : str = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = UniSpeechForCTC(__UpperCAmelCase )
else:
UpperCamelCase__ : Any = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCamelCase__ : Tuple = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCAmelCase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 201 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , 'config.json' ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE_ , 'config.json' ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE_ , 'config.json' ) )
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE_ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE_ , 'pytorch_model.bin' ) )
else:
os.makedirs(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =2
if unlogit:
__UpperCamelCase =torch.pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =p * torch.log(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =0
return -plogp.sum(dim=-1 )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
logger.info('lv, h >\t' + '\t'.join(F'{x + 1}' for x in range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
for row in range(len(SCREAMING_SNAKE_CASE_ ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:d}' for x in tensor[row].cpu().data ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=False ):
__UpperCamelCase , __UpperCamelCase =model.config.num_hidden_layers, model.config.num_attention_heads
__UpperCamelCase =torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
__UpperCamelCase =torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
if head_mask is None:
__UpperCamelCase =torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
head_mask.requires_grad_(requires_grad=SCREAMING_SNAKE_CASE_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__UpperCamelCase =None
__UpperCamelCase =0.0
__UpperCamelCase =0.0
for step, inputs in enumerate(tqdm(SCREAMING_SNAKE_CASE_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__UpperCamelCase =tuple(t.to(args.device ) for t in inputs )
((__UpperCamelCase ) , ) =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__UpperCamelCase =model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase =entropy(attn.detach() , SCREAMING_SNAKE_CASE_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(SCREAMING_SNAKE_CASE_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__UpperCamelCase =2
__UpperCamelCase =torch.pow(torch.pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
__UpperCamelCase =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
logger.info('Head ranked by importance scores' )
__UpperCamelCase =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__UpperCamelCase =torch.arange(
head_importance.numel() , device=args.device )
__UpperCamelCase =head_ranks.view_as(SCREAMING_SNAKE_CASE_ )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
return attn_entropy, head_importance, total_loss
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =compute_heads_importance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , SCREAMING_SNAKE_CASE_ , original_score * args.masking_threshold )
__UpperCamelCase =torch.ones_like(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__UpperCamelCase =original_score
while current_score >= original_score * args.masking_threshold:
__UpperCamelCase =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__UpperCamelCase =float('Inf' )
__UpperCamelCase =head_importance.view(-1 ).sort()[1]
if len(SCREAMING_SNAKE_CASE_ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__UpperCamelCase =current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__UpperCamelCase =new_head_mask.view(-1 )
__UpperCamelCase =0.0
__UpperCamelCase =new_head_mask.view_as(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =new_head_mask.clone().detach()
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
# Compute metric and head importance again
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , SCREAMING_SNAKE_CASE_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =datetime.now()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , compute_importance=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =1 / loss
__UpperCamelCase =datetime.now() - before_time
__UpperCamelCase =sum(p.numel() for p in model.parameters() )
__UpperCamelCase ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(SCREAMING_SNAKE_CASE_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase =[
v,
]
assert sum(len(SCREAMING_SNAKE_CASE_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =sum(p.numel() for p in model.parameters() )
__UpperCamelCase =datetime.now()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , compute_importance=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , actually_pruned=SCREAMING_SNAKE_CASE_ , )
__UpperCamelCase =1 / loss
__UpperCamelCase =datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(SCREAMING_SNAKE_CASE_ , args.output_dir )
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=SCREAMING_SNAKE_CASE_ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=SCREAMING_SNAKE_CASE_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=SCREAMING_SNAKE_CASE_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=SCREAMING_SNAKE_CASE_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=SCREAMING_SNAKE_CASE_ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=SCREAMING_SNAKE_CASE_ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=SCREAMING_SNAKE_CASE_ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=SCREAMING_SNAKE_CASE_ , help='Batch size.' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--local_rank' , type=SCREAMING_SNAKE_CASE_ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
__UpperCamelCase =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__UpperCamelCase =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__UpperCamelCase =torch.device('cuda' , args.local_rank )
__UpperCamelCase =1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__UpperCamelCase =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__UpperCamelCase =nn.parallel.DistributedDataParallel(
SCREAMING_SNAKE_CASE_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=SCREAMING_SNAKE_CASE_ )
elif args.n_gpu > 1:
__UpperCamelCase =nn.DataParallel(SCREAMING_SNAKE_CASE_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE_ )
torch.save(SCREAMING_SNAKE_CASE_ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE_ )
# Prepare dataset
__UpperCamelCase =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__UpperCamelCase =(torch.from_numpy(SCREAMING_SNAKE_CASE_ ),)
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__UpperCamelCase =mask_heads(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
prune_heads(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 366 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_A = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , A_ ) -> Optional[int]:
super().__init__()
__UpperCamelCase =torchvision.models.resnetaaa(pretrained=A_ )
__UpperCamelCase =list(model.children() )[:-2]
__UpperCamelCase =nn.Sequential(*A_ )
__UpperCamelCase =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self , A_ ) -> int:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
__UpperCamelCase =self.pool(self.model(A_ ) )
__UpperCamelCase =torch.flatten(A_ , start_dim=2 )
__UpperCamelCase =out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =[json.loads(A_ ) for l in open(A_ )]
__UpperCamelCase =os.path.dirname(A_ )
__UpperCamelCase =tokenizer
__UpperCamelCase =labels
__UpperCamelCase =len(A_ )
__UpperCamelCase =max_seq_length
__UpperCamelCase =transforms
def __len__( self ) -> Any:
return len(self.data )
def __getitem__( self , A_ ) -> Union[str, Any]:
__UpperCamelCase =torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=A_ ) )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =sentence[0], sentence[1:-1], sentence[-1]
__UpperCamelCase =sentence[: self.max_seq_length]
__UpperCamelCase =torch.zeros(self.n_classes )
__UpperCamelCase =1
__UpperCamelCase =Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
__UpperCamelCase =self.transforms(A_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self ) -> List[str]:
__UpperCamelCase =Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =[len(row['sentence'] ) for row in batch]
__UpperCamelCase , __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.long )
__UpperCamelCase =torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =input_row['sentence']
__UpperCamelCase =1
__UpperCamelCase =torch.stack([row['image'] for row in batch] )
__UpperCamelCase =torch.stack([row['label'] for row in batch] )
__UpperCamelCase =torch.stack([row['image_start_token'] for row in batch] )
__UpperCamelCase =torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _UpperCAmelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _UpperCAmelCase ( ):
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 117 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : int =BarthezTokenizer
UpperCamelCase__ : Union[str, Any] =BarthezTokenizerFast
UpperCamelCase__ : List[str] =True
UpperCamelCase__ : int =True
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
__UpperCamelCase : Any =BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCamelCase__ )
__UpperCamelCase : str =tokenizer
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ='<pad>'
__UpperCamelCase : Dict =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowerCamelCase__ ) , 101122 )
def __lowercase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =['A long paragraph for summarization.', 'Another paragraph for summarization.']
__UpperCamelCase : int =[0, 57, 3018, 70307, 91, 2]
__UpperCamelCase : Union[str, Any] =self.tokenizer(
lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='pt' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCamelCase : List[str] =batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__UpperCamelCase : Union[str, Any] =self.get_tokenizer()
__UpperCamelCase : int =self.get_rust_tokenizer()
__UpperCamelCase : str ='I was born in 92000, and this is falsé.'
__UpperCamelCase : Tuple =tokenizer.tokenize(lowerCamelCase__ )
__UpperCamelCase : List[str] =rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =self.get_rust_tokenizer()
__UpperCamelCase : Tuple =tokenizer.encode(lowerCamelCase__ )
__UpperCamelCase : List[str] =rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict ={'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCamelCase : str =[
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=lowerCamelCase__ , )
| 71 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : str = CpmAntTokenizer
__lowercase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
super().setUp()
lowercase__: Any = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
lowercase__: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[int] = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
lowercase__: Optional[Any] = '今天天气真好!'
lowercase__: str = ['今天', '天气', '真', '好', '!']
lowercase__: Optional[Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[str] = '今天天气真好!'
lowercase__: List[str] = [tokenizer.bos_token] + tokens
lowercase__: Tuple = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
lowercase__: Any = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 196 | 0 |
"""simple docstring"""
def snake_case ( A__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(A__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 253 |
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCamelCase_ (__A ):
__magic_name__ = '''M-CLIP'''
def __init__( self : Any , lowerCAmelCase_ : str=1_024 , lowerCAmelCase_ : str=768 , **lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : Tuple = transformerDimSize
UpperCAmelCase_ : List[str] = imageDimSize
super().__init__(**lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = MCLIPConfig
def __init__( self : str , lowerCAmelCase_ : int , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[Any] ) -> Any:
super().__init__(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = XLMRobertaModel(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : Any = self.transformer(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
UpperCAmelCase_ : Tuple = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCAmelCase_ ), embs
| 253 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case__ : Tuple = logging.get_logger(__name__)
class snake_case_( a__ ):
def __init__( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , **UpperCamelCase_ : Any ):
lowerCAmelCase : Optional[Any] = feature_size
lowerCAmelCase : int = sampling_rate
lowerCAmelCase : Dict = padding_value
lowerCAmelCase : Any = kwargs.pop('''padding_side''' , '''right''' )
lowerCAmelCase : Any = kwargs.pop('''return_attention_mask''' , _UpperCAmelCase )
super().__init__(**_UpperCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] = True , UpperCamelCase_ : Any = None , UpperCamelCase_ : Optional[Any] = False , UpperCamelCase_ : List[str] = None , UpperCamelCase_ : Any = None , UpperCamelCase_ : List[str] = None , ):
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowerCAmelCase : List[str] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
lowerCAmelCase : str = processed_features[self.model_input_names[0]]
lowerCAmelCase : Dict = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_UpperCAmelCase ) == 0:
if return_attention_mask:
lowerCAmelCase : Optional[Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowerCAmelCase : Tuple = required_input[0]
if isinstance(_UpperCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowerCAmelCase : Union[str, Any] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_UpperCAmelCase ):
lowerCAmelCase : Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_UpperCAmelCase ):
lowerCAmelCase : Dict = 'tf'
elif is_torch_tensor(_UpperCAmelCase ):
lowerCAmelCase : Optional[Any] = 'pt'
elif isinstance(_UpperCAmelCase , (int, float, list, tuple, np.ndarray) ):
lowerCAmelCase : Dict = 'np'
else:
raise ValueError(
F'''type of {first_element} unknown: {type(_UpperCAmelCase )}. '''
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowerCAmelCase : Dict = to_numpy(_UpperCAmelCase )
else:
lowerCAmelCase : List[Any] = [to_numpy(_UpperCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowerCAmelCase : Union[str, Any] = self._get_padding_strategies(padding=_UpperCAmelCase , max_length=_UpperCAmelCase )
lowerCAmelCase : Tuple = processed_features[self.model_input_names[0]]
lowerCAmelCase : Tuple = len(_UpperCAmelCase )
if not all(len(_UpperCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
lowerCAmelCase : Union[str, Any] = []
for i in range(_UpperCAmelCase ):
lowerCAmelCase : Any = {k: v[i] for k, v in processed_features.items()}
# truncation
lowerCAmelCase : Optional[Any] = self._truncate(
_UpperCAmelCase , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , truncation=_UpperCAmelCase , )
truncated_inputs.append(_UpperCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowerCAmelCase : Any = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowerCAmelCase : str = PaddingStrategy.MAX_LENGTH
lowerCAmelCase : Optional[Any] = {}
for i in range(_UpperCAmelCase ):
# padding
lowerCAmelCase : Union[str, Any] = self._pad(
truncated_inputs[i] , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
lowerCAmelCase : List[Any] = []
if value.dtype is np.dtype(np.floataa ):
lowerCAmelCase : Tuple = value.astype(np.floataa )
batch_outputs[key].append(_UpperCAmelCase )
return BatchFeature(_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] = None , UpperCamelCase_ : Any = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Dict = None , UpperCamelCase_ : str = None , ):
lowerCAmelCase : Any = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowerCAmelCase : List[Any] = len(_UpperCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCAmelCase : List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCAmelCase : Tuple = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowerCAmelCase : int = np.ones(len(_UpperCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
lowerCAmelCase : Union[str, Any] = max_length - len(_UpperCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
lowerCAmelCase : Tuple = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
lowerCAmelCase : Optional[int] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowerCAmelCase : Optional[Any] = np.pad(
_UpperCAmelCase , _UpperCAmelCase , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowerCAmelCase : Union[str, Any] = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
lowerCAmelCase : int = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowerCAmelCase : List[Any] = np.pad(
_UpperCAmelCase , _UpperCAmelCase , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] = None , UpperCamelCase_ : Union[str, Any] = None , UpperCamelCase_ : List[Any] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
lowerCAmelCase : int = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCAmelCase : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCAmelCase : Dict = len(_UpperCAmelCase ) > max_length
if needs_to_be_truncated:
lowerCAmelCase : Optional[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowerCAmelCase : Any = processed_features['attention_mask'][:max_length]
return processed_features
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=None ):
if padding is not False:
if padding is True:
lowerCAmelCase : List[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase : List[Any] = PaddingStrategy(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase : int = padding
else:
lowerCAmelCase : Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 60 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class SCREAMING_SNAKE_CASE (datasets.BuilderConfig ):
lowerCAmelCase = None
class SCREAMING_SNAKE_CASE (datasets.ArrowBasedBuilder ):
lowerCAmelCase = PandasConfig
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}')
__A : Dict = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_UpperCAmelCase , (str, list, tuple)):
__A : Union[str, Any] = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
__A : Tuple = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files}))
return splits
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__A : List[str] = table_cast(_UpperCAmelCase , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase)):
with open(_UpperCAmelCase , 'rb') as f:
__A : Optional[int] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase))
yield i, self._cast_table(_UpperCAmelCase) | 190 | 0 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
_A : Union[str, Any] =logging.get_logger(__name__)
_A : str ='''Hello world! cécé herlolip'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : str = FairseqRobertaModel.from_pretrained(_lowerCamelCase )
roberta.eval() # disable dropout
lowerCamelCase__ : str = roberta.model.encoder.sentence_encoder
lowerCamelCase__ : List[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
lowerCamelCase__ : Any = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , _lowerCamelCase )
lowerCamelCase__ : Optional[int] = XLMRobertaXLForSequenceClassification(_lowerCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase__ : Union[str, Any] = roberta_sent_encoder.embed_tokens.weight
lowerCamelCase__ : Optional[int] = roberta_sent_encoder.embed_positions.weight
lowerCamelCase__ : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCamelCase__ : Any = roberta_sent_encoder.layer_norm.weight
lowerCamelCase__ : Union[str, Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase__ : BertLayer = model.roberta.encoder.layer[i]
lowerCamelCase__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
lowerCamelCase__ : RobertaAttention = layer.attention
lowerCamelCase__ : str = roberta_layer.self_attn_layer_norm.weight
lowerCamelCase__ : Tuple = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCamelCase__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCamelCase__ : Optional[Any] = roberta_layer.self_attn.q_proj.weight
lowerCamelCase__ : str = roberta_layer.self_attn.q_proj.bias
lowerCamelCase__ : Union[str, Any] = roberta_layer.self_attn.k_proj.weight
lowerCamelCase__ : Optional[int] = roberta_layer.self_attn.k_proj.bias
lowerCamelCase__ : Optional[Any] = roberta_layer.self_attn.v_proj.weight
lowerCamelCase__ : Dict = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCamelCase__ : Dict = roberta_layer.self_attn.out_proj.weight
lowerCamelCase__ : int = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCamelCase__ : Union[str, Any] = roberta_layer.final_layer_norm.weight
lowerCamelCase__ : Any = roberta_layer.final_layer_norm.bias
# intermediate
lowerCamelCase__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase__ : str = roberta_layer.fca.weight
lowerCamelCase__ : Optional[Any] = roberta_layer.fca.bias
# output
lowerCamelCase__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase__ : int = roberta_layer.fca.weight
lowerCamelCase__ : int = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCamelCase__ : Optional[int] = roberta.model.classification_heads["mnli"].dense.weight
lowerCamelCase__ : Any = roberta.model.classification_heads["mnli"].dense.bias
lowerCamelCase__ : Optional[int] = roberta.model.classification_heads["mnli"].out_proj.weight
lowerCamelCase__ : Union[str, Any] = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
lowerCamelCase__ : str = roberta.model.encoder.lm_head.dense.weight
lowerCamelCase__ : Union[str, Any] = roberta.model.encoder.lm_head.dense.bias
lowerCamelCase__ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
lowerCamelCase__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
lowerCamelCase__ : List[str] = roberta.model.encoder.lm_head.weight
lowerCamelCase__ : Tuple = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase__ : torch.Tensor = roberta.encode(_lowerCamelCase ).unsqueeze(0 ) # batch of size 1
lowerCamelCase__ : List[str] = model(_lowerCamelCase )[0]
if classification_head:
lowerCamelCase__ : str = roberta.model.classification_heads["mnli"](roberta.extract_features(_lowerCamelCase ) )
else:
lowerCamelCase__ : int = roberta.model(_lowerCamelCase )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase__ : List[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
lowerCamelCase__ : Optional[int] = torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_lowerCamelCase ).mkdir(parents=_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_A : List[str] =parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 370 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_A : List[Any] =True
except ImportError:
_A : int =False
_A : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _lowercase ( _lowercase ):
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__: ArgumentParser ):
lowerCamelCase__ : List[str] = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=UpperCamelCase__ , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=UpperCamelCase__ , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self: Optional[int] , UpperCamelCase__: bool , UpperCamelCase__: str , UpperCamelCase__: str=None , *UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[Any] = testing
lowerCamelCase__ : Tuple = testing_file
lowerCamelCase__ : int = path
def lowerCamelCase_ ( self: int ):
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase__ : List[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(UpperCamelCase__ ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
lowerCamelCase__ : int = (
Path(UpperCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase__ : int = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase__ ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
lowerCamelCase__ : List[str] = json.load(UpperCamelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=UpperCamelCase__ , extra_context=UpperCamelCase__ , )
lowerCamelCase__ : Optional[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
lowerCamelCase__ : int = json.load(UpperCamelCase__ )
lowerCamelCase__ : Tuple = configuration["""lowercase_modelname"""]
lowerCamelCase__ : int = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(F'''{directory}/configuration.json''' )
lowerCamelCase__ : Union[str, Any] = """PyTorch""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase__ : Union[str, Any] = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase__ : Tuple = """Flax""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase__ : List[str] = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=UpperCamelCase__ )
# Tests require submodules as they have parent imports
with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , """w""" ):
pass
shutil.move(
F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , )
shutil.move(
F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(UpperCamelCase__: Optional[int] ):
with open(UpperCamelCase__ , """r""" ) as f:
lowerCamelCase__ : Union[str, Any] = f.readlines()
with open(UpperCamelCase__ , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: List[str] ):
# Create temp file
lowerCamelCase__ , lowerCamelCase__ : Any = mkstemp()
lowerCamelCase__ : Tuple = False
with fdopen(UpperCamelCase__ , """w""" ) as new_file:
with open(UpperCamelCase__ ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase__ )
if line_to_copy_below in line:
lowerCamelCase__ : int = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase__ )
if not line_found:
raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase__ , UpperCamelCase__ )
# Remove original file
remove(UpperCamelCase__ )
# Move new file
move(UpperCamelCase__ , UpperCamelCase__ )
def skip_units(UpperCamelCase__: Optional[int] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase__: List[str] ):
with open(UpperCamelCase__ ) as datafile:
lowerCamelCase__ : int = []
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : int = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase__ : List[str] = line.split("""\"""" )[1]
lowerCamelCase__ : List[str] = skip_units(UpperCamelCase__ )
elif "# Below: " in line and "##" not in line:
lowerCamelCase__ : List[Any] = line.split("""\"""" )[1]
lowerCamelCase__ : Union[str, Any] = skip_units(UpperCamelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = []
elif "# Replace with" in line and "##" not in line:
lowerCamelCase__ : str = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase__ )
remove(UpperCamelCase__ )
replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(UpperCamelCase__ )
| 129 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_UpperCAmelCase : List[str] ={
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase ( lowerCAmelCase_ )-> Any:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
if args.student_type == "roberta":
lowerCAmelCase_ : Tuple = False
elif args.student_type == "gpt2":
lowerCAmelCase_ : List[str] = False
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
if args.student_type == "roberta":
lowerCAmelCase_ : List[str] = False
def lowerCAmelCase ( )-> Tuple:
lowerCAmelCase_ : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=lowerCAmelCase_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=lowerCAmelCase_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=lowerCAmelCase_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=lowerCAmelCase_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=lowerCAmelCase_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=lowerCAmelCase_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=lowerCAmelCase_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=lowerCAmelCase_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=lowerCAmelCase_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=lowerCAmelCase_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=lowerCAmelCase_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=lowerCAmelCase_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=lowerCAmelCase_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=lowerCAmelCase_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=lowerCAmelCase_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=lowerCAmelCase_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=lowerCAmelCase_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowerCAmelCase_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=lowerCAmelCase_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowerCAmelCase_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=lowerCAmelCase_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=lowerCAmelCase_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=lowerCAmelCase_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=lowerCAmelCase_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=lowerCAmelCase_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=lowerCAmelCase_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=lowerCAmelCase_ , default=4_000 , help='''Checkpoint interval.''' )
lowerCAmelCase_ : Dict = parser.parse_args()
sanity_checks(lowerCAmelCase_ )
# ARGS #
init_gpu_params(lowerCAmelCase_ )
set_seed(lowerCAmelCase_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(lowerCAmelCase_ ) , lowerCAmelCase_ , indent=4 )
git_log(args.dump_path )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = MODEL_CLASSES[args.student_type]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCAmelCase_ : Any = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCAmelCase_ : Dict = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCAmelCase_ : Tuple = tokenizer.all_special_tokens.index(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
lowerCAmelCase_ : List[Any] = special_tok_ids
lowerCAmelCase_ : Tuple = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
lowerCAmelCase_ : Optional[Any] = pickle.load(lowerCAmelCase_ )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
lowerCAmelCase_ : List[str] = pickle.load(lowerCAmelCase_ )
lowerCAmelCase_ : int = np.maximum(lowerCAmelCase_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCAmelCase_ : Any = 0.0 # do not predict special tokens
lowerCAmelCase_ : List[Any] = torch.from_numpy(lowerCAmelCase_ )
else:
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Union[str, Any] = LmSeqsDataset(params=lowerCAmelCase_ , data=lowerCAmelCase_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
lowerCAmelCase_ : List[Any] = student_config_class.from_pretrained(args.student_config )
lowerCAmelCase_ : str = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
lowerCAmelCase_ : Dict = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCAmelCase_ )
else:
lowerCAmelCase_ : List[Any] = student_model_class(lowerCAmelCase_ )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
lowerCAmelCase_ : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCAmelCase_ )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCAmelCase_ , lowerCAmelCase_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCAmelCase_ , lowerCAmelCase_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCAmelCase_ : int = Distiller(
params=lowerCAmelCase_ , dataset=lowerCAmelCase_ , token_probs=lowerCAmelCase_ , student=lowerCAmelCase_ , teacher=lowerCAmelCase_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main() | 262 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Dict ={
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ernie_m"""
SCREAMING_SNAKE_CASE__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowercase = 2_5_0_0_0_2 , __lowercase = 7_6_8 , __lowercase = 1_2 , __lowercase = 1_2 , __lowercase = 3_0_7_2 , __lowercase = "gelu" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 5_1_4 , __lowercase = 0.02 , __lowercase = 1 , __lowercase = 1e-05 , __lowercase=None , __lowercase=False , __lowercase=0.0 , **__lowercase , ) -> Tuple:
super().__init__(pad_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : List[Any] = classifier_dropout
lowerCAmelCase_ : Any = is_decoder
lowerCAmelCase_ : List[Any] = act_dropout | 262 | 1 |
import qiskit
def lowerCamelCase_ ( _a : int , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase_ : Tuple = qiskit.QuantumCircuit(_a , _a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCAmelCase_ : Tuple = qiskit.execute(_a , _a , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_a )
if __name__ == "__main__":
print(F"Total count for various states are: {single_qubit_measure(1, 1)}")
| 355 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _snake_case ( __snake_case , __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "dinat"
A__ : Any = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: Any ,lowerCamelCase_: Any=4 ,lowerCamelCase_: Union[str, Any]=3 ,lowerCamelCase_: Union[str, Any]=64 ,lowerCamelCase_: Optional[int]=[3, 4, 6, 5] ,lowerCamelCase_: int=[2, 4, 8, 16] ,lowerCamelCase_: Optional[int]=7 ,lowerCamelCase_: Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] ,lowerCamelCase_: Tuple=3.0 ,lowerCamelCase_: Any=True ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: Optional[Any]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Optional[Any]=0.0_2 ,lowerCamelCase_: List[Any]=1e-5 ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: int=None ,lowerCamelCase_: str=None ,**lowerCamelCase_: Dict ,) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Union[str, Any] = embed_dim
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : List[Any] = len(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = num_heads
UpperCAmelCase_ : Tuple = kernel_size
UpperCAmelCase_ : int = dilations
UpperCAmelCase_ : Optional[Any] = mlp_ratio
UpperCAmelCase_ : Optional[Any] = qkv_bias
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : List[str] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[Any] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Optional[int] = layer_scale_init_value
UpperCAmelCase_ : List[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 ,len(lowerCamelCase_ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ ,out_indices=lowerCamelCase_ ,stage_names=self.stage_names )
| 59 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 24 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 100 ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowercase__ :
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple=13 , snake_case__ : str=7 , snake_case__ : Union[str, Any]=6 , snake_case__ : str=17 , snake_case__ : Any=23 , snake_case__ : int=11 , snake_case__ : Tuple=True , ):
lowerCamelCase_ : str =parent
lowerCamelCase_ : Union[str, Any] =batch_size
lowerCamelCase_ : List[Any] =seq_length
lowerCamelCase_ : Union[str, Any] =act_dim
lowerCamelCase_ : Optional[Any] =state_dim
lowerCamelCase_ : Optional[Any] =hidden_size
lowerCamelCase_ : Tuple =max_length
lowerCamelCase_ : List[Any] =is_training
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[Any] =floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowerCamelCase_ : Optional[Any] =floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowerCamelCase_ : List[Any] =floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCamelCase_ : Optional[Any] =floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCamelCase_ : List[Any] =ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowerCamelCase_ : Optional[int] =random_attention_mask((self.batch_size, self.seq_length) )
lowerCamelCase_ : List[str] =self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCAmelCase__ ( self : Any ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : int , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , ):
lowerCamelCase_ : Tuple =DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : str =model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : List[str] =self.prepare_config_and_inputs()
(
lowerCamelCase_
) : Optional[int] =config_and_inputs
lowerCamelCase_ : Optional[int] ={
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( snake_case__, snake_case__, snake_case__, unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
_UpperCAmelCase :int = ()
_UpperCAmelCase :int = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_UpperCAmelCase :Union[str, Any] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :Dict = False
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :int = False
_UpperCAmelCase :str = False
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Dict =DecisionTransformerModelTester(self )
lowerCamelCase_ : str =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def UpperCAmelCase__ ( self : List[str] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : str =DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : List[Any] =model_class(snake_case__ )
lowerCamelCase_ : int =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : List[Any] =[*signature.parameters.keys()]
lowerCamelCase_ : List[str] =[
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Optional[int] =2 # number of steps of autoregressive prediction we will perform
lowerCamelCase_ : int =10 # defined by the RL environment, may be normalized
lowerCamelCase_ : List[Any] =DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowerCamelCase_ : Union[str, Any] =model.to(snake_case__ )
lowerCamelCase_ : Any =model.config
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] =torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
lowerCamelCase_ : Optional[Any] =torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=snake_case__ )
lowerCamelCase_ : int =torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowerCamelCase_ : str =state
lowerCamelCase_ : Optional[int] =torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
lowerCamelCase_ : int =torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
lowerCamelCase_ : Tuple =torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
lowerCamelCase_ : str =torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
lowerCamelCase_ : Union[str, Any] =torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
lowerCamelCase_ : Optional[int] =torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowerCamelCase_ : Dict =model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
lowerCamelCase_ : Optional[Any] =( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
lowerCamelCase_ : str =action_pred[0, -1]
lowerCamelCase_ : Optional[int] =torch.cat([states, state] , dim=1 )
lowerCamelCase_ : Optional[Any] =returns_to_go[0, -1] - reward
lowerCamelCase_ : str =torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowerCamelCase_ : int =torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 365 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
A__ : str = logging.get_logger(__name__)
class lowercase__ ( snake_case__ ):
def __init__( self : Optional[Any] , *snake_case__ : int , **snake_case__ : Any ):
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 209 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = (KDPMaDiscreteScheduler,)
lowerCamelCase = 10
def snake_case__ ( self : Dict,**lowercase_ : Union[str, Any] )-> Any:
'''simple docstring'''
A__ = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase_ )
return config
def snake_case__ ( self : List[str] )-> Union[str, Any]:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def snake_case__ ( self : Any )-> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001],[0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase_,beta_end=lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Tuple:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type='v_prediction' )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
if torch_device == "mps":
return
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def snake_case__ ( self : Optional[Any] )-> Any:
'''simple docstring'''
if torch_device == "mps":
return
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps,device=lowercase_ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if str(lowercase_ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 7 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = pos_x
__lowerCAmelCase : Optional[Any] = pos_y
__lowerCAmelCase : Optional[int] = (pos_y, pos_x)
__lowerCAmelCase : Union[str, Any] = goal_x
__lowerCAmelCase : Any = goal_y
__lowerCAmelCase : Optional[Any] = g_cost
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Union[str, Any] = self.calculate_heuristic()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = abs(self.pos_x - self.goal_x )
__lowerCAmelCase : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _SCREAMING_SNAKE_CASE ):
return self.f_cost < other.f_cost
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [self.start]
__lowerCAmelCase : list[Node] = []
__lowerCAmelCase : str = False
def __lowerCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase : Union[str, Any] = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = []
for action in delta:
__lowerCAmelCase : Optional[int] = parent.pos_x + action[1]
__lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = node
__lowerCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCamelCase__ = GreedyBestFirst(init, goal)
lowerCamelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCamelCase__ = 2
for elem in grid:
print(elem) | 86 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class _SCREAMING_SNAKE_CASE( unittest.TestCase , A ):
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = load_tool('''text-to-speech''' )
self.tool.setup()
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :Dict = self.tool('''hey''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] ,torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) ,) )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :Dict = self.tool('''hey''' )
__SCREAMING_SNAKE_CASE :Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] ,torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) ,) ) | 239 |
"""simple docstring"""
from torch import nn
class _SCREAMING_SNAKE_CASE( nn.Module ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE :Tuple = class_size
__SCREAMING_SNAKE_CASE :str = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__SCREAMING_SNAKE_CASE :Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.mlp(SCREAMING_SNAKE_CASE__ )
return logits | 239 | 1 |
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : Any = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
lowercase__ : Any = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
lowercase__ : Optional[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowercase__ : Union[str, Any] = gray_img.shape
# set different points to rotate image
lowercase__ : Optional[Any] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
lowercase__ : List[str] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
lowercase__ : Optional[Any] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
lowercase__ : Tuple = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
lowercase__ : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowercase__ : Any = plt.figure(1)
lowercase__ : Optional[int] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 224 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
__SCREAMING_SNAKE_CASE = len(bin(lowerCAmelCase_ )[3:] )
__SCREAMING_SNAKE_CASE = bin(abs(lowerCAmelCase_ ) - (1 << binary_number_length) )[3:]
__SCREAMING_SNAKE_CASE = (
(
"1"
+ "0" * (binary_number_length - len(lowerCAmelCase_ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_lowerCamelCase = ["""image_processor""", """tokenizer"""]
_lowerCamelCase = """ViTImageProcessor"""
_lowerCamelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
__A : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase__ , )
__A : List[Any] = kwargs.pop('''feature_extractor''' )
__A : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
__A : int = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if visual_prompt is not None:
__A : Dict = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
__A : List[str] = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if visual_prompt is not None and images is not None:
__A : List[str] = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
__A : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
__A : Optional[Any] = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def UpperCamelCase__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase__ , )
return self.image_processor_class
@property
def UpperCamelCase__( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCamelCase__ , )
return self.image_processor
| 367 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a_ = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 0 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a : List[str] = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
a : Any = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
a : List[str] = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
a : str = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
a : Any = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : List[str] ) ->Optional[int]:
'''simple docstring'''
for tf_name, hf_name in patterns:
a : List[Any] = k.replace(_lowercase , _lowercase )
return k
def _SCREAMING_SNAKE_CASE ( _lowercase : dict , _lowercase : dict ) ->BigBirdPegasusForConditionalGeneration:
'''simple docstring'''
a : List[Any] = BigBirdPegasusConfig(**_lowercase )
a : Any = BigBirdPegasusForConditionalGeneration(_lowercase )
a : Optional[Any] = torch_model.state_dict()
a : Tuple = {}
# separating decoder weights
a : int = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
a : Tuple = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
a : str = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
a : Union[str, Any] = DECODER_PATTERNS
a : List[Any] = rename_state_dict_key(_lowercase , _lowercase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
a : Optional[Any] = v.T
a : List[str] = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
a : Dict = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
a : Any = REMAINING_PATTERNS
a : Optional[Any] = rename_state_dict_key(_lowercase , _lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
a : Dict = v.T
a : Optional[Any] = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
a : str = mapping["model.embed_positions.weight"]
a : Union[str, Any] = mapping.pop("model.embed_positions.weight" )
a, a : Optional[Any] = torch_model.load_state_dict(_lowercase , strict=_lowercase )
a : Optional[Any] = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->Dict:
'''simple docstring'''
a : int = tf.train.list_variables(_lowercase )
a : str = {}
a : Optional[Any] = ["global_step"]
for name, shape in tqdm(_lowercase , desc="converting tf checkpoint to dict" ):
a : List[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
a : Optional[int] = tf.train.load_variable(_lowercase , _lowercase )
a : Optional[Any] = array
return tf_weights
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str , _lowercase : dict ) ->List[Any]:
'''simple docstring'''
a : List[Any] = get_tf_weights_as_numpy(_lowercase )
a : Union[str, Any] = convert_bigbird_pegasus(_lowercase , _lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a : Optional[Any] = parser.parse_args()
a : Optional[Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 105 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase : List[Any] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCamelCase : Any = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCamelCase : Optional[Any] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCamelCase : Optional[Any] = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : Tuple ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def A ( self : Union[str, Any] , _a : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def A ( self : int , _a : Tuple , _a : List[str] , _a : List[str]=0.9 , _a : Dict=3 , _a : Optional[int]=0.5 ) -> Optional[int]:
'''simple docstring'''
if NLTK_VERSION >= version.Version('3.6.5' ):
_SCREAMING_SNAKE_CASE =[
meteor_score.single_meteor_score(
word_tokenize(_a ) , word_tokenize(_a ) , alpha=_a , beta=_a , gamma=_a )
for ref, pred in zip(_a , _a )
]
else:
_SCREAMING_SNAKE_CASE =[
meteor_score.single_meteor_score(_a , _a , alpha=_a , beta=_a , gamma=_a )
for ref, pred in zip(_a , _a )
]
return {"meteor": np.mean(_a )}
| 47 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 11 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None:
__a = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : int ) -> None:
__a = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print('''''' )
print(len(lowerCAmelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 11 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
snake_case : str = nn.Parameter(lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
snake_case : Optional[int] = nn.Parameter(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[Any]:
# set torch weights for 1-to-1 comparison
snake_case : Tuple = np.asarray(weights[0] )
snake_case : Tuple = np.asarray(weights[1] )
snake_case : Any = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(lowercase ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(lowercase ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(lowercase ).view(-1 ,lowercase ).contiguous().transpose(0 ,1 ) ,)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
# set torch weights for 1-to-1 comparison
snake_case : Union[str, Any] = np.asarray(weights[0] )
snake_case : Optional[int] = np.asarray(weights[1] )
snake_case : Any = np.asarray(weights[2] )
snake_case : Tuple = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(lowercase ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(lowercase ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(lowercase ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(lowercase ).view(-1 ,lowercase ).contiguous().transpose(0 ,1 ) ,)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Dict:
# layernorm 1
snake_case : Dict = weights[0][0][0]
snake_case : Any = np.asarray(layer_norm_a[0] )
snake_case : Optional[Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(lowercase ) ,torch.tensor(lowercase ) ,)
# lsh weights + output
snake_case : List[str] = weights[0][1]
if len(lowercase ) < 4:
set_layer_weights_in_torch_lsh(lowercase ,torch_block.attention ,lowercase )
else:
set_layer_weights_in_torch_local(lowercase ,torch_block.attention ,lowercase )
# intermediate weighs
snake_case : Any = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowercase ) == 4:
snake_case : List[str] = intermediate_weights[2]
# layernorm 2
snake_case : Optional[int] = np.asarray(intermediate_weights[0][0] )
snake_case : List[str] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(lowercase ) ,torch.tensor(lowercase ) ,)
# intermediate dense
snake_case : Tuple = np.asarray(intermediate_weights[1][0] )
snake_case : Optional[int] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(lowercase ).transpose(0 ,1 ).contiguous() ,torch.tensor(lowercase ) ,)
# intermediate out
snake_case : List[str] = np.asarray(intermediate_weights[4][0] )
snake_case : List[str] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(lowercase ).transpose(0 ,1 ).contiguous() ,torch.tensor(lowercase ) ,)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
# reformer model
snake_case : int = torch_model.reformer
# word embeds
snake_case : List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(lowercase ) ,)
if isinstance(weights[3] ,lowercase ):
snake_case : Optional[int] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
snake_case : Dict = nn.Parameter(torch.tensor(lowercase ) )
snake_case : Union[str, Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowercase ,lowercase ,lowercase )
# output layer norm
snake_case : Any = np.asarray(weights[7][0] )
snake_case : Union[str, Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(lowercase ) ,torch.tensor(lowercase ) ,)
# output embeddings
snake_case : Union[str, Any] = np.asarray(weights[9][0] )
snake_case : str = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(lowercase ).transpose(0 ,1 ).contiguous() ,torch.tensor(lowercase ) ,)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : List[Any] = ReformerConfig.from_json_file(lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case : int = ReformerModelWithLMHead(lowercase )
with open(lowercase ,"""rb""" ) as f:
snake_case : Optional[int] = pickle.load(lowercase )["""weights"""]
set_model_weights_in_torch(lowercase ,lowercase ,config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Tuple = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 124 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=True , A=None , A=0.9 , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> Dict:
snake_case : Optional[int] = size if size is not None else {"""shortest_edge""": 3_0}
snake_case : Optional[int] = crop_size if crop_size is not None else {"""height""": 3_0, """width""": 3_0}
snake_case : int = parent
snake_case : List[str] = batch_size
snake_case : Any = num_channels
snake_case : Optional[Any] = min_resolution
snake_case : Any = max_resolution
snake_case : Dict = do_resize_and_center_crop
snake_case : Any = size
snake_case : List[Any] = crop_pct
snake_case : int = crop_size
snake_case : int = do_normalize
snake_case : List[Any] = image_mean
snake_case : Tuple = image_std
def UpperCAmelCase ( self ) -> int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : str = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> Dict:
snake_case : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(A , """size""" ) )
self.assertTrue(hasattr(A , """crop_pct""" ) )
self.assertTrue(hasattr(A , """do_normalize""" ) )
self.assertTrue(hasattr(A , """image_mean""" ) )
self.assertTrue(hasattr(A , """image_std""" ) )
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 3_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 3_0, """width""": 3_0} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : Tuple = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
snake_case : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : Any = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processing
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : int = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 124 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_A = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[Any]:
if rng is None:
lowerCAmelCase__ : Any = random.Random()
lowerCAmelCase__ : str = 1
for dim in shape:
total_dims *= dim
lowerCAmelCase__ : Dict = []
for _ in range(__UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowerCAmelCase__ : Dict = np.array(__UpperCAmelCase , dtype=jnp.intaa ).reshape(__UpperCAmelCase )
return output
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> List[Any]:
lowerCAmelCase__ : Dict = ids_tensor(__UpperCAmelCase , vocab_size=2 , rng=__UpperCAmelCase )
# make sure that at least one token is attended to for each batch
lowerCAmelCase__ : str = 1
return attn_mask
@require_flax
class _lowerCamelCase :
_lowerCamelCase :Optional[int] = None
_lowerCamelCase :Optional[Any] = ()
def _lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCAmelCase__ : Optional[Any] = 2
lowerCAmelCase__ : Union[str, Any] = inputs["""input_ids"""].shape[-1] // 2
lowerCAmelCase__ : Tuple = inputs["""input_ids"""][:max_batch_size, :sequence_length]
lowerCAmelCase__ : str = jnp.ones_like(UpperCamelCase )
lowerCAmelCase__ : Tuple = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCAmelCase__ : Union[str, Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCAmelCase__ : Dict = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self._get_input_ids_and_config()
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Optional[int] = max_length
lowerCAmelCase__ : Any = 0
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ : List[str] = model_class(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase__ : Tuple = getattr(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = pt_model_class(UpperCamelCase ).eval()
lowerCAmelCase__ : Dict = load_flax_weights_in_pytorch_model(UpperCamelCase , flax_model.params )
lowerCAmelCase__ : Union[str, Any] = flax_model.generate(UpperCamelCase ).sequences
lowerCAmelCase__ : List[Any] = pt_model.generate(torch.tensor(UpperCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCAmelCase__ : Union[str, Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_input_ids_and_config()
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ : int = model_class(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = jit(model.generate )
lowerCAmelCase__ : Tuple = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self._get_input_ids_and_config()
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : List[Any] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ : List[str] = model_class(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
lowerCAmelCase__ : List[Any] = jit(model.generate )
lowerCAmelCase__ : int = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = self._get_input_ids_and_config()
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : int = max_length
lowerCAmelCase__ : List[Any] = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ : List[str] = model_class(UpperCamelCase )
lowerCAmelCase__ : int = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
lowerCAmelCase__ : Tuple = jit(model.generate )
lowerCAmelCase__ : List[Any] = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self._get_input_ids_and_config()
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : int = max_length
lowerCAmelCase__ : Any = 2
lowerCAmelCase__ : Any = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ : Union[str, Any] = model_class(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = self._get_input_ids_and_config()
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Optional[int] = max_length
lowerCAmelCase__ : Optional[int] = 0.8
lowerCAmelCase__ : Any = 10
lowerCAmelCase__ : Any = 0.3
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : Optional[Any] = 8
lowerCAmelCase__ : Tuple = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ : Any = model_class(UpperCamelCase )
lowerCAmelCase__ : List[str] = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
lowerCAmelCase__ : str = jit(model.generate )
lowerCAmelCase__ : Any = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = self._get_input_ids_and_config()
lowerCAmelCase__ : int = max_length
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : Tuple = 8
lowerCAmelCase__ : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ : Union[str, Any] = model_class(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
lowerCAmelCase__ : Tuple = jit(model.generate )
lowerCAmelCase__ : int = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = self._get_input_ids_and_config()
lowerCAmelCase__ : Tuple = max_length
lowerCAmelCase__ : str = 2
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : List[Any] = 8
lowerCAmelCase__ : int = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ : Dict = model_class(UpperCamelCase )
lowerCAmelCase__ : Dict = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = jit(model.generate )
lowerCAmelCase__ : Optional[int] = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ : Any = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Any = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ : Any = model_class(UpperCamelCase )
lowerCAmelCase__ : Dict = model.generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
lowerCAmelCase__ : Any = jit(model.generate )
lowerCAmelCase__ : Dict = jit_generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ : Dict = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ : str = model_class(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model.generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
lowerCAmelCase__ : List[str] = jit(model.generate )
lowerCAmelCase__ : Tuple = jit_generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ : Optional[int] = 2
lowerCAmelCase__ : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ : Tuple = model_class(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = model.generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
lowerCAmelCase__ : int = jit(model.generate )
lowerCAmelCase__ : Union[str, Any] = jit_generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
lowerCAmelCase__ : str = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowerCAmelCase__ : List[str] = """Hello world"""
lowerCAmelCase__ : Dict = tokenizer(UpperCamelCase , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(UpperCamelCase , """do_samples""" ):
model.generate(UpperCamelCase , do_samples=UpperCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(UpperCamelCase , """foo""" ):
lowerCAmelCase__ : Tuple = {"""foo""": """bar"""}
model.generate(UpperCamelCase , **UpperCamelCase )
| 212 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A = logging.get_logger(__name__)
_A = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_A = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_A = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCAmelCase__ : Any = bs[:]
lowerCAmelCase__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ : Dict = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def lowercase_ ( __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : List[Any] = set()
lowerCAmelCase__ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : Optional[Any] = char
return pairs
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase :Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Any="replace" , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="</s>" , UpperCamelCase : str="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : int="<pad>" , UpperCamelCase : Dict="<mask>" , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
lowerCAmelCase__ : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase__ : Any = json.load(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : Dict = errors # how to handle errors in decoding
lowerCAmelCase__ : Union[str, Any] = bytes_to_unicode()
lowerCAmelCase__ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ : Any = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.encoder )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Union[str, Any] = tuple(UpperCamelCase )
lowerCAmelCase__ : List[str] = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : List[str] = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : str = bigram
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[str] = 0
while i < len(UpperCamelCase ):
try:
lowerCAmelCase__ : Optional[Any] = word.index(UpperCamelCase , UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : List[str] = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : List[Any] = tuple(UpperCamelCase )
lowerCAmelCase__ : Tuple = new_word
if len(UpperCamelCase ) == 1:
break
else:
lowerCAmelCase__ : Any = get_pairs(UpperCamelCase )
lowerCAmelCase__ : Tuple = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Tuple = word
return word
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = []
for token in re.findall(self.pat , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = """""".join(UpperCamelCase )
lowerCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : int = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" )
lowerCAmelCase__ : Optional[Any] = 0
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase__ : Dict = token_index
writer.write(""" """.join(UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : int = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()):
lowerCAmelCase__ : Tuple = """ """ + text
return (text, kwargs)
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> Any:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self : str , UpperCamelCase : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.encode(UpperCamelCase )
if len(UpperCamelCase ) > self.model_max_length:
lowerCAmelCase__ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 212 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class snake_case ( _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
A_ : Optional[int] = "convnextv2"
def __init__( self : Optional[Any], _lowerCamelCase : Tuple=3, _lowerCamelCase : Optional[int]=4, _lowerCamelCase : Any=4, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : List[Any]=None, _lowerCamelCase : str="gelu", _lowerCamelCase : Optional[Any]=0.02, _lowerCamelCase : str=1e-12, _lowerCamelCase : str=0.0, _lowerCamelCase : List[str]=2_24, _lowerCamelCase : List[Any]=None, _lowerCamelCase : Dict=None, **_lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__A = num_channels
__A = patch_size
__A = num_stages
__A = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
__A = [3, 3, 9, 3] if depths is None else depths
__A = hidden_act
__A = initializer_range
__A = layer_norm_eps
__A = drop_path_rate
__A = image_size
__A = ['''stem'''] + [f'stage{idx}' for idx in range(1, len(self.depths ) + 1 )]
__A , __A = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase, out_indices=_lowerCamelCase, stage_names=self.stage_names )
| 266 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowercase_ = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''https://pypi.org/pypi/diffusers/json'''
__A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys()
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) )
def lowerCAmelCase ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__A = Path(__UpperCamelCase ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
init_hf_modules()
__A = Path(__UpperCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__A = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__A = f.read()
# Imports of the form `import .xxx`
__A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = False
__A = [module_file]
__A = []
# Let's recurse through all relative imports
while not no_change:
__A = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__UpperCamelCase ) )
__A = Path(__UpperCamelCase ).parent
__A = [str(module_path / m ) for m in new_imports]
__A = [f for f in new_import_files if f not in all_relative_imports]
__A = [f'{f}.py' for f in new_import_files]
__A = len(__UpperCamelCase ) == 0
all_relative_imports.extend(__UpperCamelCase )
return all_relative_imports
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__A = f.read()
# Imports of the form `import xxx`
__A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
__A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
__A = list(set(__UpperCamelCase ) )
__A = []
for imp in imports:
try:
importlib.import_module(__UpperCamelCase )
except ImportError:
missing_packages.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' )
return get_relative_imports(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = module_path.replace(os.path.sep , '''.''' )
__A = importlib.import_module(__UpperCamelCase )
if class_name is None:
return find_pipeline_class(__UpperCamelCase )
return getattr(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
__A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) )
__A = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __UpperCamelCase )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
f' {loaded_module}.' )
__A = cls
return pipeline_class
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ):
"""simple docstring"""
__A = str(__UpperCamelCase )
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
__A = module_file_or_url
__A = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
__A = get_diffusers_versions()
# cut ".dev0"
__A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
__A = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
__A = f'v{revision}'
elif revision == "main":
__A = revision
else:
raise ValueError(
f'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
f' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
__A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase )
try:
__A = cached_download(
__UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , )
__A = '''git'''
__A = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
__A = hf_hub_download(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , )
__A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
__A = check_imports(__UpperCamelCase )
# Now we move the module inside our cached dynamic modules.
__A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__UpperCamelCase )
__A = Path(__UpperCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__UpperCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
__A = f'{module_needed}.py'
shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__A = use_auth_token
elif use_auth_token is True:
__A = HfFolder.get_token()
else:
__A = None
__A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__A = submodule_path / commit_hash
__A = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__UpperCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__UpperCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
return os.path.join(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ):
"""simple docstring"""
__A = get_cached_module_file(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
| 266 | 1 |
'''simple docstring'''
from typing import Any
class A__ :
def __init__( self : List[str] , _a : Any ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =data
_SCREAMING_SNAKE_CASE =None
def __repr__( self : Optional[Any] ) -> str:
'''simple docstring'''
return f"Node({self.data})"
class A__ :
def __init__( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =None
def __iter__( self : Dict ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.head
while node:
yield node.data
_SCREAMING_SNAKE_CASE =node.next
def __len__( self : Tuple ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return "->".join([str(_a ) for item in self] )
def __getitem__( self : List[str] , _a : int ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Tuple , _a : int , _a : Any ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_SCREAMING_SNAKE_CASE =self.head
for _ in range(_a ):
_SCREAMING_SNAKE_CASE =current.next
_SCREAMING_SNAKE_CASE =data
def A ( self : List[str] , _a : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , _a )
def A ( self : Dict , _a : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , _a )
def A ( self : str , _a : int , _a : Any ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_SCREAMING_SNAKE_CASE =Node(_a )
if self.head is None:
_SCREAMING_SNAKE_CASE =new_node
elif index == 0:
_SCREAMING_SNAKE_CASE =self.head # link new_node to head
_SCREAMING_SNAKE_CASE =new_node
else:
_SCREAMING_SNAKE_CASE =self.head
for _ in range(index - 1 ):
_SCREAMING_SNAKE_CASE =temp.next
_SCREAMING_SNAKE_CASE =temp.next
_SCREAMING_SNAKE_CASE =new_node
def A ( self : Optional[int] ) -> None: # print every node data
'''simple docstring'''
print(self )
def A ( self : int ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def A ( self : List[str] ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def A ( self : List[Any] , _a : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_SCREAMING_SNAKE_CASE =self.head # default first node
if index == 0:
_SCREAMING_SNAKE_CASE =self.head.next
else:
_SCREAMING_SNAKE_CASE =self.head
for _ in range(index - 1 ):
_SCREAMING_SNAKE_CASE =temp.next
_SCREAMING_SNAKE_CASE =temp.next
_SCREAMING_SNAKE_CASE =temp.next.next
return delete_node.data
def A ( self : Any ) -> bool:
'''simple docstring'''
return self.head is None
def A ( self : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.head
while current:
# Store the current node's next node.
_SCREAMING_SNAKE_CASE =current.next
# Make the current node's next point backwards
_SCREAMING_SNAKE_CASE =prev
# Make the previous node be the current node
_SCREAMING_SNAKE_CASE =current
# Make the current node the next node (to progress iteration)
_SCREAMING_SNAKE_CASE =next_node
# Return prev in order to put the head at the end
_SCREAMING_SNAKE_CASE =prev
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =LinkedList()
assert linked_list.is_empty() is True
assert str(_UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_UpperCamelCase ) == i
linked_list.insert_nth(_UpperCamelCase , i + 1 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_UpperCamelCase ) == 9
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_SCREAMING_SNAKE_CASE =-i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(-8 , 1 ) )
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_SCREAMING_SNAKE_CASE =LinkedList()
for i in test_input:
linked_list.insert_tail(_UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_SCREAMING_SNAKE_CASE =linked_list.delete_head()
assert result == -9
assert (
str(_UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_SCREAMING_SNAKE_CASE =linked_list.delete_tail()
assert result == 12.2
assert (
str(_UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_SCREAMING_SNAKE_CASE =linked_list.delete_nth(10 )
assert result is None
assert (
str(_UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_UpperCamelCase )
assert (
str(_UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE =LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
_SCREAMING_SNAKE_CASE =input('Enter New Value: ' ).strip()
print('New list:' )
print(_UpperCamelCase )
print(f"length of linked_list is : {len(_UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 360 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class A__ ( A__ ):
A__ = 'MCTCTFeatureExtractor'
A__ = 'AutoTokenizer'
def __init__( self : Optional[Any] , _a : Optional[int] , _a : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_a , _a )
_SCREAMING_SNAKE_CASE =self.feature_extractor
_SCREAMING_SNAKE_CASE =False
def __call__( self : Dict , *_a : str , **_a : Dict ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_SCREAMING_SNAKE_CASE =kwargs.pop('raw_speech' )
else:
_SCREAMING_SNAKE_CASE =kwargs.pop('audio' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('sampling_rate' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('text' , _a )
if len(_a ) > 0:
_SCREAMING_SNAKE_CASE =args[0]
_SCREAMING_SNAKE_CASE =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_SCREAMING_SNAKE_CASE =self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if text is not None:
_SCREAMING_SNAKE_CASE =self.tokenizer(_a , **_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_SCREAMING_SNAKE_CASE =encodings['input_ids']
return inputs
def A ( self : Any , *_a : List[str] , **_a : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a )
def A ( self : Dict , *_a : Tuple , **_a : Dict ) -> List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_a , **_a )
_SCREAMING_SNAKE_CASE =kwargs.pop('input_features' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('labels' , _a )
if len(_a ) > 0:
_SCREAMING_SNAKE_CASE =args[0]
_SCREAMING_SNAKE_CASE =args[1:]
if input_features is not None:
_SCREAMING_SNAKE_CASE =self.feature_extractor.pad(_a , *_a , **_a )
if labels is not None:
_SCREAMING_SNAKE_CASE =self.tokenizer.pad(_a , **_a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_SCREAMING_SNAKE_CASE =labels['input_ids']
return input_features
def A ( self : Tuple , *_a : Dict , **_a : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def A ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =self.tokenizer
yield
_SCREAMING_SNAKE_CASE =self.feature_extractor
_SCREAMING_SNAKE_CASE =False
| 114 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__UpperCAmelCase = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCAmelCase = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCAmelCase = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCAmelCase = {
'num_train_timesteps': 40,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
__UpperCAmelCase = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
__UpperCAmelCase = {
'num_train_timesteps': 151,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowercase__ ( __snake_case : Any , __snake_case : Dict , __snake_case : List[Any] , __snake_case : str , __snake_case : Dict=False ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = checkpoint[F"{old_prefix}.in_layers.0.weight"]
UpperCAmelCase_ : str = checkpoint[F"{old_prefix}.in_layers.0.bias"]
UpperCAmelCase_ : Dict = checkpoint[F"{old_prefix}.in_layers.2.weight"]
UpperCAmelCase_ : Optional[Any] = checkpoint[F"{old_prefix}.in_layers.2.bias"]
UpperCAmelCase_ : int = checkpoint[F"{old_prefix}.emb_layers.1.weight"]
UpperCAmelCase_ : Any = checkpoint[F"{old_prefix}.emb_layers.1.bias"]
UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.out_layers.0.weight"]
UpperCAmelCase_ : Tuple = checkpoint[F"{old_prefix}.out_layers.0.bias"]
UpperCAmelCase_ : Dict = checkpoint[F"{old_prefix}.out_layers.3.weight"]
UpperCAmelCase_ : Optional[int] = checkpoint[F"{old_prefix}.out_layers.3.bias"]
if has_skip:
UpperCAmelCase_ : Tuple = checkpoint[F"{old_prefix}.skip_connection.weight"]
UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def lowercase__ ( __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict=None ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
UpperCAmelCase_ : Dict = checkpoint[F"{old_prefix}.norm.weight"]
UpperCAmelCase_ : int = checkpoint[F"{old_prefix}.norm.bias"]
UpperCAmelCase_ : Tuple = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : str = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Optional[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Any = (
checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ : Optional[int] = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = torch.load(__snake_case , map_location='cpu' )
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Optional[int] = checkpoint['time_embed.0.weight']
UpperCAmelCase_ : str = checkpoint['time_embed.0.bias']
UpperCAmelCase_ : str = checkpoint['time_embed.2.weight']
UpperCAmelCase_ : str = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ : Any = checkpoint['label_emb.weight']
UpperCAmelCase_ : Dict = checkpoint['input_blocks.0.0.weight']
UpperCAmelCase_ : List[str] = checkpoint['input_blocks.0.0.bias']
UpperCAmelCase_ : List[Any] = unet_config['down_block_types']
UpperCAmelCase_ : Any = unet_config['layers_per_block']
UpperCAmelCase_ : Optional[Any] = unet_config['attention_head_dim']
UpperCAmelCase_ : Union[str, Any] = unet_config['block_out_channels']
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : str = channels_list[0]
for i, layer_type in enumerate(__snake_case ):
UpperCAmelCase_ : List[Any] = channels_list[i]
UpperCAmelCase_ : Union[str, Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__snake_case ):
UpperCAmelCase_ : Tuple = F"down_blocks.{i}.resnets.{j}"
UpperCAmelCase_ : Dict = F"input_blocks.{current_layer}.0"
UpperCAmelCase_ : Union[str, Any] = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ : Optional[int] = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[int] = F"down_blocks.{i}.resnets.{j}"
UpperCAmelCase_ : str = F"input_blocks.{current_layer}.0"
UpperCAmelCase_ : Dict = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
UpperCAmelCase_ : Dict = F"down_blocks.{i}.attentions.{j}"
UpperCAmelCase_ : List[Any] = F"input_blocks.{current_layer}.1"
UpperCAmelCase_ : List[str] = convert_attention(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
current_layer += 1
if i != len(__snake_case ) - 1:
UpperCAmelCase_ : Dict = F"down_blocks.{i}.downsamplers.0"
UpperCAmelCase_ : Optional[Any] = F"input_blocks.{current_layer}.0"
UpperCAmelCase_ : Any = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
current_layer += 1
UpperCAmelCase_ : Optional[int] = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ : int = 'mid_block.resnets.0'
UpperCAmelCase_ : Optional[Any] = 'middle_block.0'
UpperCAmelCase_ : Union[str, Any] = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : Optional[Any] = 'mid_block.attentions.0'
UpperCAmelCase_ : Union[str, Any] = 'middle_block.1'
UpperCAmelCase_ : List[Any] = convert_attention(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : int = 'mid_block.resnets.1'
UpperCAmelCase_ : Dict = 'middle_block.2'
UpperCAmelCase_ : str = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Union[str, Any] = unet_config['up_block_types']
for i, layer_type in enumerate(__snake_case ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ : Dict = F"up_blocks.{i}.resnets.{j}"
UpperCAmelCase_ : Dict = F"output_blocks.{current_layer}.0"
UpperCAmelCase_ : Dict = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
current_layer += 1
if i != len(__snake_case ) - 1:
UpperCAmelCase_ : Dict = F"up_blocks.{i}.upsamplers.0"
UpperCAmelCase_ : Optional[Any] = F"output_blocks.{current_layer-1}.1"
UpperCAmelCase_ : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ : Optional[int] = F"up_blocks.{i}.resnets.{j}"
UpperCAmelCase_ : List[Any] = F"output_blocks.{current_layer}.0"
UpperCAmelCase_ : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case )
UpperCAmelCase_ : List[str] = F"up_blocks.{i}.attentions.{j}"
UpperCAmelCase_ : Optional[Any] = F"output_blocks.{current_layer}.1"
UpperCAmelCase_ : Optional[Any] = convert_attention(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
current_layer += 1
if i != len(__snake_case ) - 1:
UpperCAmelCase_ : List[str] = F"up_blocks.{i}.upsamplers.0"
UpperCAmelCase_ : List[str] = F"output_blocks.{current_layer-1}.2"
UpperCAmelCase_ : str = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : Optional[Any] = checkpoint['out.0.weight']
UpperCAmelCase_ : str = checkpoint['out.0.bias']
UpperCAmelCase_ : List[str] = checkpoint['out.2.weight']
UpperCAmelCase_ : str = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = strabool(args.class_cond)
__UpperCAmelCase = os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
__UpperCAmelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCAmelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__UpperCAmelCase = TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
__UpperCAmelCase = None
__UpperCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config)
__UpperCAmelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__UpperCAmelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__UpperCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
__UpperCAmelCase = CMStochasticIterativeScheduler(**scheduler_config)
__UpperCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 29 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A_ ( ) -> List[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_lowerCAmelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def A_ ( ) -> Tuple:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def A_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_lowerCAmelCase ):
http_head("https://huggingface.co" )
| 52 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def A_ ( _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Any = SwinvaConfig()
UpperCamelCase : List[Any] = swinva_name.split("_" )
UpperCamelCase : Tuple = name_split[1]
if "to" in name_split[3]:
UpperCamelCase : Tuple = int(name_split[3][-3:] )
else:
UpperCamelCase : Union[str, Any] = int(name_split[3] )
if "to" in name_split[2]:
UpperCamelCase : List[Any] = int(name_split[2][-2:] )
else:
UpperCamelCase : Optional[Any] = int(name_split[2][6:] )
if model_size == "tiny":
UpperCamelCase : Tuple = 96
UpperCamelCase : List[Any] = (2, 2, 6, 2)
UpperCamelCase : str = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase : List[str] = 96
UpperCamelCase : List[Any] = (2, 2, 18, 2)
UpperCamelCase : Any = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase : Optional[Any] = 128
UpperCamelCase : str = (2, 2, 18, 2)
UpperCamelCase : str = (4, 8, 16, 32)
else:
UpperCamelCase : Any = 192
UpperCamelCase : Union[str, Any] = (2, 2, 18, 2)
UpperCamelCase : int = (6, 12, 24, 48)
if "to" in swinva_name:
UpperCamelCase : List[str] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
UpperCamelCase : Optional[Any] = 2_1841
UpperCamelCase : List[str] = "huggingface/label-files"
UpperCamelCase : List[Any] = "imagenet-22k-id2label.json"
UpperCamelCase : List[str] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase : Union[str, Any] = idalabel
UpperCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase : List[Any] = 1000
UpperCamelCase : Optional[int] = "huggingface/label-files"
UpperCamelCase : Dict = "imagenet-1k-id2label.json"
UpperCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase : Dict = idalabel
UpperCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase : Tuple = img_size
UpperCamelCase : str = num_classes
UpperCamelCase : int = embed_dim
UpperCamelCase : Any = depths
UpperCamelCase : Union[str, Any] = num_heads
UpperCamelCase : Dict = window_size
return config
def A_ ( _lowerCAmelCase ) -> Optional[int]:
if "patch_embed.proj" in name:
UpperCamelCase : Optional[Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCamelCase : Optional[int] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
UpperCamelCase : Union[str, Any] = "encoder." + name
if "attn.proj" in name:
UpperCamelCase : Dict = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCamelCase : int = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCamelCase : List[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCamelCase : Tuple = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCamelCase : List[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCamelCase : Optional[int] = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
UpperCamelCase : Union[str, Any] = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
UpperCamelCase : List[str] = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
UpperCamelCase : Tuple = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
UpperCamelCase : Union[str, Any] = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
UpperCamelCase : str = "layernorm.weight"
if name == "norm.bias":
UpperCamelCase : Any = "layernorm.bias"
if "head" in name:
UpperCamelCase : int = name.replace("head" , "classifier" )
else:
UpperCamelCase : Any = "swinv2." + name
return name
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
UpperCamelCase : List[str] = orig_state_dict.pop(_lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase : Tuple = key.split("." )
UpperCamelCase : Union[str, Any] = int(key_split[1] )
UpperCamelCase : Union[str, Any] = int(key_split[3] )
UpperCamelCase : List[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase : List[str] = val[:dim, :]
UpperCamelCase : int = val[dim : dim * 2, :]
UpperCamelCase : Union[str, Any] = val[-dim:, :]
else:
UpperCamelCase : List[Any] = val[:dim]
UpperCamelCase : List[str] = val[
dim : dim * 2
]
UpperCamelCase : Union[str, Any] = val[-dim:]
else:
UpperCamelCase : List[Any] = val
return orig_state_dict
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
UpperCamelCase : Optional[int] = get_swinva_config(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = SwinvaForImageClassification(_lowerCAmelCase )
model.eval()
UpperCamelCase : Optional[int] = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase : str = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
UpperCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
UpperCamelCase : str = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
UpperCamelCase : str = timm_model(inputs["pixel_values"] )
UpperCamelCase : List[str] = model(**_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 365 |
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : List[str] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
__lowerCamelCase : List[Any] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
__lowerCamelCase : Optional[int] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : Tuple = spearmanr(A_ , A_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 140 | 0 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Tuple = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : str = "data2vec-audio"
def __init__( self : Dict , UpperCAmelCase_ : int=32 , UpperCAmelCase_ : str=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : List[str]=3072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[Any]=1E-5 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : int=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : Tuple=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : str=19 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : Optional[Any]=0.05 , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Dict="sum" , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : str=(512, 512, 512, 512, 1500) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Dict=(1, 2, 3, 1, 1) , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : Dict , ):
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Union[str, Any] = feat_extract_activation
lowerCAmelCase : Any = list(UpperCAmelCase_ )
lowerCAmelCase : int = list(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = list(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = conv_bias
lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
lowerCAmelCase : int = conv_pos_kernel_size
lowerCAmelCase : Optional[Any] = len(self.conv_dim )
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : List[Any] = activation_dropout
lowerCAmelCase : Union[str, Any] = feat_proj_dropout
lowerCAmelCase : int = final_dropout
lowerCAmelCase : Tuple = layerdrop
lowerCAmelCase : int = layer_norm_eps
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : int = vocab_size
lowerCAmelCase : List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase : Optional[Any] = mask_time_prob
lowerCAmelCase : int = mask_time_length
lowerCAmelCase : List[str] = mask_time_min_masks
lowerCAmelCase : Optional[int] = mask_feature_prob
lowerCAmelCase : List[Any] = mask_feature_length
lowerCAmelCase : str = mask_feature_min_masks
# ctc loss
lowerCAmelCase : List[Any] = ctc_loss_reduction
lowerCAmelCase : Optional[int] = ctc_zero_infinity
# adapter
lowerCAmelCase : Dict = add_adapter
lowerCAmelCase : Optional[int] = adapter_kernel_size
lowerCAmelCase : List[str] = adapter_stride
lowerCAmelCase : str = num_adapter_layers
lowerCAmelCase : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase : List[str] = list(UpperCAmelCase_ )
lowerCAmelCase : Any = list(UpperCAmelCase_ )
lowerCAmelCase : Tuple = list(UpperCAmelCase_ )
lowerCAmelCase : int = xvector_output_dim
@property
def lowercase__ ( self : Dict ):
return math.prod(self.conv_stride )
| 138 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__A : Tuple = logging.get_logger(__name__)
__A : List[Any] = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class __A ( lowerCAmelCase ):
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : str ):
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class __A ( lowerCAmelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = None ):
lowerCAmelCase : str = max_length
lowerCAmelCase : Any = max_position_embeddings
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : List[Any] = input_ids.shape[-1]
lowerCAmelCase : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'exceptions, performance degradation, or nothing at all.' )
return is_done
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'with `max_length = start_length + max_new_tokens` instead.' , UpperCAmelCase_ , )
lowerCAmelCase : Optional[Any] = start_length
lowerCAmelCase : List[Any] = max_new_tokens
lowerCAmelCase : Union[str, Any] = start_length + max_new_tokens
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Dict , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : Optional[int] ):
return input_ids.shape[-1] >= self.max_length
class __A ( lowerCAmelCase ):
def __init__( self : List[Any] , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[float] = None ):
lowerCAmelCase : List[str] = max_time
lowerCAmelCase : Optional[Any] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : List[Any] , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : List[Any] ):
return time.time() - self.initial_timestamp > self.max_time
class __A ( lowerCAmelCase ):
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : Optional[Any] ):
return any(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) for criteria in self )
@property
def lowercase__ ( self : Tuple ):
for stopping_criterium in self:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return stopping_criterium.max_length
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return stopping_criterium.max_length
return None
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> StoppingCriteriaList:
'''simple docstring'''
lowerCAmelCase : Dict = stopping_criteria.max_length
lowerCAmelCase : Dict = deepcopy(_UpperCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter', _UpperCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_UpperCAmelCase ) )
return new_stopping_criteria
| 138 | 1 |
'''simple docstring'''
def A (__lowerCamelCase :int = 1000 ):
_lowerCAmelCase = 2**power
_lowerCAmelCase = str(__lowerCamelCase )
_lowerCAmelCase = list(__lowerCamelCase )
_lowerCAmelCase = 0
for i in list_num:
sum_of_num += int(__lowerCamelCase )
return sum_of_num
if __name__ == "__main__":
_lowercase = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
_lowercase = solution(power)
print("""Sum of the digits is: """, result)
| 370 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 229 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_SCREAMING_SNAKE_CASE = 50_00_00
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__)
_SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : int = dataset.map(**__a )
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : Dict = dataset.filter(**__a )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
snake_case_ : List[Any] = generate_example_dataset(
os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a )
snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a )
def tokenize(__a ):
return tokenizer(examples['text'] )
snake_case_ : Any = map(__a )
snake_case_ : Tuple = map(__a , batched=__a )
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='numpy' ):
snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='pandas' ):
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='torch' , columns='numbers' ):
snake_case_ : int = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a )
snake_case_ : int = map(__a , function=__a , batched=__a )
snake_case_ : Optional[Any] = filter(__a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__a , 'wb' ) as f:
f.write(json.dumps(__a ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 327 | 1 |
def A ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCAmelCase_ = True
for i in range(0 , len(lowercase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase_ , UpperCAmelCase_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase_ = False
for i in range(1 , len(lowercase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase_ , UpperCAmelCase_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase_ = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
UpperCamelCase_ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase_ = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 363 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
UpperCAmelCase_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase)
processor.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , **_lowercase :Dict) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).tokenizer
def __a ( self :Optional[Any] , **_lowercase :Optional[Any]) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).image_processor
def __a ( self :Dict , **_lowercase :Tuple) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).qformer_tokenizer
def __a ( self :Optional[int]) -> str:
shutil.rmtree(self.tmpdirname)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
self.assertIsInstance(processor.qformer_tokenizer , _lowercase)
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase)
UpperCAmelCase_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 344 | 0 |
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : int = 0 ):
"""simple docstring"""
__snake_case = key
def a (self : str , a__ : str , a__ : int ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and isinstance(a__ , a__ )
__snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(a__ ) ^ key ) for ch in content]
def a (self : Optional[Any] , a__ : str , a__ : int ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and isinstance(a__ , a__ )
__snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(a__ ) ^ key ) for ch in content]
def a (self : Union[str, Any] , a__ : str , a__ : int = 0 ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and isinstance(a__ , a__ )
__snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__snake_case = ''''''
for ch in content:
ans += chr(ord(a__ ) ^ key )
return ans
def a (self : Tuple , a__ : str , a__ : int = 0 ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and isinstance(a__ , a__ )
__snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__snake_case = ''''''
for ch in content:
ans += chr(ord(a__ ) ^ key )
return ans
def a (self : List[Any] , a__ : str , a__ : int = 0 ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and isinstance(a__ , a__ )
try:
with open(a__ ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(a__ , a__ ) )
except OSError:
return False
return True
def a (self : Any , a__ : str , a__ : int ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and isinstance(a__ , a__ )
try:
with open(a__ ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(a__ , a__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 24 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = process
__SCREAMING_SNAKE_CASE = params
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.dataset[i]
__SCREAMING_SNAKE_CASE = self.process(_A , **self.params )
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = loader
__SCREAMING_SNAKE_CASE = infer
__SCREAMING_SNAKE_CASE = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = loader_batch_size
# Internal bookkeeping
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __len__( self ):
'''simple docstring'''
return len(self.loader )
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def _A ( self ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__SCREAMING_SNAKE_CASE = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__SCREAMING_SNAKE_CASE = {}
for k, element in self._loader_batch_data.items():
if isinstance(_A , _A ):
# Convert ModelOutput to tuple first
__SCREAMING_SNAKE_CASE = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__SCREAMING_SNAKE_CASE = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__SCREAMING_SNAKE_CASE = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__SCREAMING_SNAKE_CASE = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__SCREAMING_SNAKE_CASE = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__SCREAMING_SNAKE_CASE = self._loader_batch_data.__class__(_A )
self._loader_batch_index += 1
return result
def _A ( self ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__SCREAMING_SNAKE_CASE = next(self.iterator )
__SCREAMING_SNAKE_CASE = self.infer(_A , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_A , torch.Tensor ):
__SCREAMING_SNAKE_CASE = processed
else:
__SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
__SCREAMING_SNAKE_CASE = processed[key]
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = len(_A )
else:
__SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__SCREAMING_SNAKE_CASE = observed_batch_size
# Setting internal index to unwrap the batch
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A=None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
__SCREAMING_SNAKE_CASE = None
return self
def _A ( self ):
'''simple docstring'''
if self.subiterator is None:
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__SCREAMING_SNAKE_CASE = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
__SCREAMING_SNAKE_CASE = next(self.subiterator )
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__SCREAMING_SNAKE_CASE = self.loader_batch_item()
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
if is_last:
return accumulator
while not is_last:
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_A , torch.Tensor ):
__SCREAMING_SNAKE_CASE = processed
else:
__SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
__SCREAMING_SNAKE_CASE = processed[key]
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = len(_A )
else:
__SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__SCREAMING_SNAKE_CASE = observed_batch_size
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = 0
while self._loader_batch_index < self.loader_batch_size:
__SCREAMING_SNAKE_CASE = self.loader_batch_item()
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
if is_last:
return accumulator
else:
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
return accumulator
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = key
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
return self.dataset[i][self.key]
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = keya
__SCREAMING_SNAKE_CASE = keya
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 257 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__snake_case = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class lowercase__ ( unittest.TestCase ):
def A_ ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : bool , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = None ):
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
SCREAMING_SNAKE_CASE__ = os.path.abspath('examples' )
for item in os.listdir(UpperCAmelCase_ ):
if item not in EXCLUDE_EXAMPLES:
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if os.path.isfile(UpperCAmelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase_ , feature_script=UpperCAmelCase_ , tested_section='main()' if parser_only else 'training_function()' , ):
SCREAMING_SNAKE_CASE__ = compare_against_test(
os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = '\n'.join(UpperCAmelCase_ )
if special_strings is not None:
for string in special_strings:
SCREAMING_SNAKE_CASE__ = diff.replace(UpperCAmelCase_ , '' )
self.assertEqual(UpperCAmelCase_ , '' )
def A_ ( self : int ):
self.one_complete_example('complete_nlp_example.py' , UpperCAmelCase_ )
self.one_complete_example('complete_nlp_example.py' , UpperCAmelCase_ )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
SCREAMING_SNAKE_CASE__ = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.one_complete_example('complete_cv_example.py' , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class lowercase__ ( _UpperCAmelCase ):
A__ : str =False
@classmethod
def A_ ( cls : Optional[int] ):
super().setUpClass()
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE__ = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def A_ ( cls : int ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
SCREAMING_SNAKE_CASE__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
SCREAMING_SNAKE_CASE__ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
self.assertNotIn('epoch 0:' , UpperCAmelCase_ )
self.assertIn('epoch 1:' , UpperCAmelCase_ )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
SCREAMING_SNAKE_CASE__ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE__ = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , UpperCAmelCase_ )
self.assertIn('epoch 1:' , UpperCAmelCase_ )
else:
self.assertIn('epoch 0:' , UpperCAmelCase_ )
self.assertIn('epoch 1:' , UpperCAmelCase_ )
@slow
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
SCREAMING_SNAKE_CASE__ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = re.findall('({.+})' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = [r for r in results if 'accuracy' in r][-1]
SCREAMING_SNAKE_CASE__ = ast.literal_eval(UpperCAmelCase_ )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
SCREAMING_SNAKE_CASE__ = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , 'tracking' ) ) )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 169 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ) -> Optional[Any]:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
SCREAMING_SNAKE_CASE__ = nn.Parameter(UpperCamelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
SCREAMING_SNAKE_CASE__ = nn.Parameter(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.asarray(weights[0] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[1] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase_ ).view(-1 , UpperCamelCase_ ).contiguous().transpose(0 , 1 ) , )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.asarray(weights[0] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[1] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[2] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase_ ).view(-1 , UpperCamelCase_ ).contiguous().transpose(0 , 1 ) , )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = weights[0][0][0]
SCREAMING_SNAKE_CASE__ = np.asarray(layer_norm_a[0] )
SCREAMING_SNAKE_CASE__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase_ ) , torch.tensor(UpperCamelCase_ ) , )
# lsh weights + output
SCREAMING_SNAKE_CASE__ = weights[0][1]
if len(UpperCamelCase_ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase_ , torch_block.attention , UpperCamelCase_ )
else:
set_layer_weights_in_torch_local(UpperCamelCase_ , torch_block.attention , UpperCamelCase_ )
# intermediate weighs
SCREAMING_SNAKE_CASE__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase_ ) == 4:
SCREAMING_SNAKE_CASE__ = intermediate_weights[2]
# layernorm 2
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[0][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase_ ) , torch.tensor(UpperCamelCase_ ) , )
# intermediate dense
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[1][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase_ ) , )
# intermediate out
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[4][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase_ ) , )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch_model.reformer
# word embeds
SCREAMING_SNAKE_CASE__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase_ ) , )
if isinstance(weights[3] , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
SCREAMING_SNAKE_CASE__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.tensor(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
SCREAMING_SNAKE_CASE__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# output layer norm
SCREAMING_SNAKE_CASE__ = np.asarray(weights[7][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase_ ) , torch.tensor(UpperCamelCase_ ) , )
# output embeddings
SCREAMING_SNAKE_CASE__ = np.asarray(weights[9][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase_ ) , )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ReformerConfig.from_json_file(UpperCamelCase_ )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE__ = ReformerModelWithLMHead(UpperCamelCase_ )
with open(UpperCamelCase_ , 'rb' ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase_ )['weights']
set_model_weights_in_torch(UpperCamelCase_ , UpperCamelCase_ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 169 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('''/''' )
A__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = TopKBinarizer.apply(lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(lowercase_ )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 14 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = encoder_stride
A__ = num_attention_outputs
A__ = embed_dim
A__ = embed_dim + 1
A__ = resolution
A__ = depths
A__ = hidden_sizes
A__ = dim
A__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
A__ = TFEfficientFormerModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
A__ = TFEfficientFormerModelTester(self)
A__ = ConfigTester(
self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict):
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
if hasattr(self.model_tester , '''encoder_seq_length'''):
A__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
A__ = seq_length * self.model_tester.chunk_length
else:
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase__ , (list, tuple))
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
A__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ = model_class(UpperCAmelCase__)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ = model(UpperCAmelCase__)
self.assertTrue(outputs_dict is not None)
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 14 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class lowerCamelCase__( __SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True})
UpperCAmelCase__ : ClassVar[Features] = Features({'audio': Audio()})
UpperCAmelCase__ : ClassVar[Features] = Features({'transcription': Value('string')})
UpperCAmelCase__ : str = "audio"
UpperCAmelCase__ : str = "transcription"
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict ):
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , _a ):
raise ValueError(F'Column {self.audio_column} is not an Audio type.' )
__lowerCamelCase = copy.deepcopy(self )
__lowerCamelCase = self.input_schema.copy()
__lowerCamelCase = features[self.audio_column]
__lowerCamelCase = input_schema
return task_template
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 361 |
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A : Any = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
"""simple docstring"""
lowerCamelCase__ = ['''audio_values''', '''audio_mask''']
def __init__( self : List[str] , __magic_name__ : Optional[Any]=2_048 , __magic_name__ : List[Any]=1 , __magic_name__ : Optional[int]=[16, 16] , __magic_name__ : Dict=128 , __magic_name__ : int=44_100 , __magic_name__ : Optional[Any]=86 , __magic_name__ : Optional[int]=2_048 , __magic_name__ : Optional[int]=0.0 , **__magic_name__ : Dict , ) -> Optional[Any]:
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE_ = spectrogram_length
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE_ = n_fft
SCREAMING_SNAKE_CASE_ = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase__ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=UpperCamelCase__ , norm="slaney" , mel_scale="slaney" , ).T
def __A ( self : Tuple , __magic_name__ : Optional[Any] ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = spectrogram(
UpperCamelCase__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
SCREAMING_SNAKE_CASE_ = log_spec[:, :-1]
SCREAMING_SNAKE_CASE_ = log_spec - 20.0
SCREAMING_SNAKE_CASE_ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] = None , __magic_name__ : int = True , __magic_name__ : Any = None , __magic_name__ : List[Any] = False , __magic_name__ : int = False , **__magic_name__ : Union[str, Any] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE_ = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE_ = np.array(UpperCamelCase__ ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE_ = np.ones([len(UpperCamelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = padded_audio_features * self.padding_value
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE_ = audio_features[i]
SCREAMING_SNAKE_CASE_ = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE_ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
SCREAMING_SNAKE_CASE_ = {"audio_values": padded_audio_features}
SCREAMING_SNAKE_CASE_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 118 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( _snake_case , unittest.TestCase ):
lowercase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def snake_case_ ( self , UpperCamelCase__=0 ) -> Tuple:
'''simple docstring'''
A_ = np.random.RandomState(UpperCamelCase__ )
A_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs["""prompt"""]]
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs.pop("""prompt""" )]
A_ = pipe.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""np""" , )
A_ = text_inputs["""input_ids"""]
A_ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
A_ = prompt_embeds
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = 3 * ["""this is a negative prompt"""]
A_ = negative_prompt
A_ = 3 * [inputs["""prompt"""]]
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs.pop("""prompt""" )]
A_ = []
for p in [prompt, negative_prompt]:
A_ = pipe.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""np""" , )
A_ = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
A_ , A_ = embeds
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
@property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = ort.SessionOptions()
A_ = False
return options
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
# using the PNDM scheduler by default
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
A_ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """open neural network exchange"""
A_ = np.random.RandomState(0 )
A_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """open neural network exchange"""
A_ = np.random.RandomState(0 )
A_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = 0
def test_callback_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
A_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
A_ = False
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """Andromeda galaxy in a bottle"""
A_ = np.random.RandomState(0 )
pipe(
prompt=UpperCamelCase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert pipe.safety_checker is None
A_ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
A_ = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
A_ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 162 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> bool:
'''simple docstring'''
__snake_case : Dict = 0
__snake_case : Union[str, Any] = number
while duplicate > 0:
__snake_case : str = divmod(UpperCAmelCase_ , 10 )
fact_sum += factorial(UpperCAmelCase_ )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
_a : int= int(input("Enter number: ").strip())
print(
f'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 364 | """simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
_a : int= "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_a : Dict= BASE_URL + "/user"
# https://github.com/settings/tokens
_a : Union[str, Any]= os.environ.get("USER_TOKEN", "")
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> dict[Any, Any]:
'''simple docstring'''
__snake_case : Tuple = {
'Authorization': F"token {auth_token}",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 95 | 0 |
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCAmelCase_ ( __A ) -> dict[str, str]:
'''simple docstring'''
UpperCAmelCase__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
UpperCAmelCase__ = remove_duplicates(key.upper() )
UpperCAmelCase__ = len(__A )
# First fill cipher with key characters
UpperCAmelCase__ = {alphabet[i]: char for i, char in enumerate(__A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__A ), 26 ):
UpperCAmelCase__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCAmelCase__ = alphabet[i - offset]
UpperCAmelCase__ = char
return cipher_alphabet
def lowerCAmelCase_ ( __A, __A ) -> str:
'''simple docstring'''
return "".join(cipher_map.get(__A, __A ) for ch in message.upper() )
def lowerCAmelCase_ ( __A, __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__A, __A ) for ch in message.upper() )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase__ = input("Enter message to encode or decode: " ).strip()
UpperCAmelCase__ = input("Enter keyword: " ).strip()
UpperCAmelCase__ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
UpperCAmelCase__ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
UpperCAmelCase__ = create_cipher_map(__A )
print(func(__A, __A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 65 | import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__A, __A )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(__A, __A, bias=__A )
UpperCAmelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A, __A="facebook/mbart-large-en-ro", __A=False, __A=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = torch.load(__A, map_location="cpu" )["model"]
remove_ignore_keys_(__A )
UpperCAmelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ = MBartConfig.from_pretrained(__A, vocab_size=__A )
if mbart_aa and finetuned:
UpperCAmelCase__ = "relu"
UpperCAmelCase__ = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase__ = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
UpperCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 65 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('''T''')
class lowerCamelCase__ ( Generic[T] ):
def __init__( self : Any , _a : T ):
a__: List[str] =data
a__: List[Any] =self
a__: Union[str, Any] =0
class lowerCamelCase__ ( Generic[T] ):
def __init__( self : Dict ):
a__: Union[str, Any] ={}
def _lowerCamelCase ( self : int , _a : T ):
a__: int =DisjointSetTreeNode(__UpperCAmelCase )
def _lowerCamelCase ( self : Tuple , _a : T ):
a__: Optional[int] =self.map[data]
if elem_ref != elem_ref.parent:
a__: str =self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _lowerCamelCase ( self : Union[str, Any] , _a : DisjointSetTreeNode[T] , _a : DisjointSetTreeNode[T] ):
if nodea.rank > nodea.rank:
a__: Optional[Any] =nodea
else:
a__: Tuple =nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _lowerCamelCase ( self : str , _a : T , _a : T ):
self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) )
class lowerCamelCase__ ( Generic[T] ):
def __init__( self : str ):
a__: List[str] ={}
def _lowerCamelCase ( self : Optional[int] , _a : T ):
if node not in self.connections:
a__: str ={}
def _lowerCamelCase ( self : str , _a : T , _a : T , _a : int ):
self.add_node(__UpperCAmelCase )
self.add_node(__UpperCAmelCase )
a__: Tuple =weight
a__: Union[str, Any] =weight
def _lowerCamelCase ( self : Dict ):
a__: Optional[Any] =[]
a__: int =set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _a : x[2] )
# creating the disjoint set
a__: List[Any] =DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__UpperCAmelCase )
# MST generation
a__: List[str] =0
a__: Union[str, Any] =0
a__: Optional[int] =GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
a__ , a__ , a__: int =edges[index]
index += 1
a__: Tuple =disjoint_set.find_set(__UpperCAmelCase )
a__: List[Any] =disjoint_set.find_set(__UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase )
return graph
| 354 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__UpperCAmelCase = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = '''left'''
def __init__( self : Dict , _a : List[Any] , _a : Any=False , _a : int=True , _a : Union[str, Any]=False , _a : Dict="<s>" , _a : str="</s>" , _a : Optional[int]="<unk>" , _a : Union[str, Any]="<sep>" , _a : List[Any]="<pad>" , _a : Optional[Any]="<cls>" , _a : str="<mask>" , _a : Any=["<eop>", "<eod>"] , _a : Optional[Dict[str, Any]] = None , **_a : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
a__: Dict =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
a__: Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
a__: Dict =3
a__: Tuple =do_lower_case
a__: int =remove_space
a__: List[Any] =keep_accents
a__: List[str] =vocab_file
a__: Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _lowerCamelCase ( self : Any ):
return len(self.sp_model )
def _lowerCamelCase ( self : List[Any] ):
a__: Dict ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
a__: Dict =self.__dict__.copy()
a__: List[Any] =None
return state
def __setstate__( self : Optional[Any] , _a : Tuple ):
a__: List[Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__: List[str] ={}
a__: int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self : Dict , _a : str ):
if self.remove_space:
a__: Optional[int] =" ".join(inputs.strip().split() )
else:
a__: Optional[int] =inputs
a__: Dict =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
a__: Optional[int] =unicodedata.normalize("NFKD" , _a )
a__: int ="".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
a__: Dict =outputs.lower()
return outputs
def _lowerCamelCase ( self : List[Any] , _a : str ):
a__: Dict =self.preprocess_text(_a )
a__: Dict =self.sp_model.encode(_a , out_type=_a )
a__: str =[]
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a__: Optional[Any] =self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a__: Optional[int] =cur_pieces[1:]
else:
a__: Tuple =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def _lowerCamelCase ( self : Dict , _a : Dict ):
return self.sp_model.PieceToId(_a )
def _lowerCamelCase ( self : Dict , _a : Optional[Any] ):
return self.sp_model.IdToPiece(_a )
def _lowerCamelCase ( self : Optional[Any] , _a : Tuple ):
a__: Tuple ="".join(_a ).replace(_a , " " ).strip()
return out_string
def _lowerCamelCase ( self : Optional[int] , _a : List[int] , _a : bool = False , _a : bool = None , _a : bool = True , **_a : Union[str, Any] , ):
a__: Optional[int] =kwargs.pop("use_source_tokenizer" , _a )
a__: Any =self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a__: List[str] =[]
a__: Any =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
a__: List[str] =[]
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
a__: Union[str, Any] ="".join(_a )
a__: List[Any] =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a__: Optional[int] =self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _lowerCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ):
a__: Dict =[self.sep_token_id]
a__: Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None ):
a__: Any =[self.sep_token_id]
a__: List[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , _a : str , _a : Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__: List[Any] =os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
a__: Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 42 | 0 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
a__ : Dict = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
a__ : List[Any] = '''main'''
# Default branch name
a__ : Dict = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
a__ : List[Any] = '''aaaaaaa'''
# This commit does not exist, so we should 404.
a__ : List[Any] = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
a__ : Dict = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def UpperCAmelCase_( ):
"""simple docstring"""
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def UpperCAmelCase_( ):
"""simple docstring"""
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Any:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class a_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def __lowerCAmelCase ( self ) ->Tuple:
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''start_positions''', '''end_positions'''] )
class a_ ( a__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
@require_tf
def __lowerCAmelCase ( self ) ->Any:
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''start_positions''', '''end_positions'''] )
class a_ ( a__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
@require_flax
def __lowerCAmelCase ( self ) ->str:
# Flax models don't have labels
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
class a_ ( a__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
| 313 |
from abc import ABC, abstractmethod
from typing import List, Optional
class a_ ( a__ ):
"""simple docstring"""
def __init__( self ) ->List[str]:
# test for the above condition
self.test()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : List[Any] = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Union[str, Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids )
SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : str = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self ) ->Any:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict:
SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Dict = self.seqlen
SCREAMING_SNAKE_CASE : int = self.fulfilled_idx
SCREAMING_SNAKE_CASE : Tuple = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict:
SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : List[str] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Tuple = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = root
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : int = start[current_token]
SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = nested_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[Any] = completed
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = []
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]:
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : int = self.current_seq
SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : str = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str:
SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : str = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 313 | 1 |
"""simple docstring"""
from math import factorial
lowerCAmelCase : Any = {str(d): factorial(d) for d in range(10)}
def a__ ( snake_case__ ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case__ ) )
def a__ ( ) -> int:
lowerCamelCase = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case__ ) if sum_of_digit_factorial(snake_case__ ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 168 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a__ ( snake_case__ ) -> Optional[Any]:
lowerCamelCase = {}
lowerCamelCase = job["""started_at"""]
lowerCamelCase = job["""completed_at"""]
lowerCamelCase = date_parser.parse(snake_case__ )
lowerCamelCase = date_parser.parse(snake_case__ )
lowerCamelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCamelCase = start
lowerCamelCase = end
lowerCamelCase = duration_in_min
return job_info
def a__ ( snake_case__ , snake_case__=None ) -> Optional[Any]:
lowerCamelCase = None
if token is not None:
lowerCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'}
lowerCamelCase = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
lowerCamelCase = requests.get(snake_case__ , headers=snake_case__ ).json()
lowerCamelCase = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} )
lowerCamelCase = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(snake_case__ ):
lowerCamelCase = requests.get(url + F'&page={i + 2}' , headers=snake_case__ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
lowerCAmelCase : Any = parser.parse_args()
lowerCAmelCase : Optional[int] = get_job_time(args.workflow_run_id)
lowerCAmelCase : Dict = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v['duration']}""")
| 168 | 1 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
lowerCAmelCase = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 338 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = '''maskformer-swin'''
__UpperCamelCase : Any = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Dict=9_6 , lowerCAmelCase_ : Union[str, Any]=[2, 2, 6, 2] , lowerCAmelCase_ : Optional[Any]=[3, 6, 1_2, 2_4] , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Optional[Any]=4.0 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : str=1e-5 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: List[Any] = image_size
_A: Optional[int] = patch_size
_A: Optional[Any] = num_channels
_A: str = embed_dim
_A: Any = depths
_A: str = len(lowerCAmelCase_ )
_A: Any = num_heads
_A: int = window_size
_A: Dict = mlp_ratio
_A: str = qkv_bias
_A: List[str] = hidden_dropout_prob
_A: List[Any] = attention_probs_dropout_prob
_A: Dict = drop_path_rate
_A: List[Any] = hidden_act
_A: Optional[int] = use_absolute_embeddings
_A: Tuple = layer_norm_eps
_A: Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A: Any = int(embed_dim * 2 ** (len(lowerCAmelCase_ ) - 1) )
_A: Tuple = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
_A , _A: str = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 121 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = LayoutLMTokenizer
_lowerCAmelCase : List[Any] = LayoutLMTokenizerFast
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[Any] = True
def snake_case ( self ):
"""simple docstring"""
super().setUp()
snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def snake_case ( self , **lowerCAmelCase ):
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = 'UNwant\u00E9d,running'
snake_case = 'unwanted, running'
return input_text, output_text
def snake_case ( self ):
"""simple docstring"""
snake_case = self.tokenizer_class(self.vocab_file )
snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def snake_case ( self ):
"""simple docstring"""
pass
| 363 | """simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
snake_case = BioGptForCausalLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# create attention mask
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
snake_case = self.seq_length // 2
snake_case = 0
# first forward pass
snake_case ,snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case = ids_tensor((1,) , lowerCAmelCase ).item() + 1
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase )] , dim=1 , )
# get two different outputs
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , past_key_values=lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
# first forward pass
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
snake_case ,snake_case = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[
'last_hidden_state'
]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
snake_case = BioGptForCausalLM(lowerCAmelCase )
model.to(lowerCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case ( self , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(lowerCAmelCase )
snake_case = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = BioGptForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = config_and_inputs
snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : str = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : str = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[str] = False
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase , gradient_checkpointing=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase )
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = 'left'
# Define PAD Token = EOS Token = 50256
snake_case = tokenizer.eos_token
snake_case = model.config.eos_token_id
# use different length sentences to test batching
snake_case = [
'Hello, my dog is a little',
'Today, I',
]
snake_case = tokenizer(lowerCAmelCase , return_tensors='pt' , padding=lowerCAmelCase )
snake_case = inputs['input_ids'].to(lowerCAmelCase )
snake_case = model.generate(
input_ids=lowerCAmelCase , attention_mask=inputs['attention_mask'].to(lowerCAmelCase ) , )
snake_case = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(lowerCAmelCase )
snake_case = model.generate(input_ids=lowerCAmelCase )
snake_case = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
snake_case = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(lowerCAmelCase )
snake_case = model.generate(input_ids=lowerCAmelCase , max_length=model.config.max_length - num_paddings )
snake_case = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase )
snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase )
snake_case = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = BioGptModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = 'multi_label_classification'
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
snake_case = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
snake_case = model(lowerCAmelCase )[0]
snake_case = 4_23_84
snake_case = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase )
snake_case = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1E-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase )
torch.manual_seed(0 )
snake_case = tokenizer('COVID-19 is' , return_tensors='pt' ).to(lowerCAmelCase )
snake_case = model.generate(
**lowerCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCAmelCase , )
snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase )
snake_case = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 149 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
__lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False
__lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False
__lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
__lowerCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCAmelCase = [4, 4, 4, 4]
__lowerCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
else:
__lowerCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCAmelCase = 96
elif "small" in model_name:
__lowerCAmelCase = 96
elif "base" in model_name:
__lowerCAmelCase = 128
elif "large" in model_name:
__lowerCAmelCase = 192
elif "xlarge" in model_name:
__lowerCAmelCase = 256
elif "huge" in model_name:
__lowerCAmelCase = 352
# set label information
__lowerCAmelCase = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
__lowerCAmelCase = """imagenet-22k-id2label.json"""
else:
__lowerCAmelCase = """imagenet-1k-id2label.json"""
__lowerCAmelCase = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = FocalNetConfig(
embed_dim=_lowerCAmelCase , depths=_lowerCAmelCase , focal_levels=_lowerCAmelCase , focal_windows=_lowerCAmelCase , use_conv_embed=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase , use_post_layernorm=_lowerCAmelCase , use_layerscale=_lowerCAmelCase , )
return config
def lowercase (_lowerCAmelCase ):
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__lowerCAmelCase = """encoder.""" + name
if "encoder.layers" in name:
__lowerCAmelCase = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
__lowerCAmelCase = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCAmelCase = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCAmelCase = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCAmelCase = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
__lowerCAmelCase = """layernorm.weight"""
if name == "norm.bias":
__lowerCAmelCase = """layernorm.bias"""
if "head" in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
else:
__lowerCAmelCase = """focalnet.""" + name
return name
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
# fmt: off
__lowerCAmelCase = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
__lowerCAmelCase = model_name_to_url[model_name]
print("""Checkpoint URL: """ , _lowerCAmelCase )
__lowerCAmelCase = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(_lowerCAmelCase )
__lowerCAmelCase = val
__lowerCAmelCase = get_focalnet_config(_lowerCAmelCase )
__lowerCAmelCase = FocalNetForImageClassification(_lowerCAmelCase )
model.eval()
# load state dict
model.load_state_dict(_lowerCAmelCase )
# verify conversion
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = BitImageProcessor(
do_resize=_lowerCAmelCase , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase , crop_size=224 , do_normalize=_lowerCAmelCase , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase , )
__lowerCAmelCase = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
__lowerCAmelCase = processor(images=_lowerCAmelCase , return_tensors="""pt""" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowerCAmelCase = image_transforms(_lowerCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowerCAmelCase , atol=1E-4 )
__lowerCAmelCase = model(**_lowerCAmelCase )
__lowerCAmelCase = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCAmelCase = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__lowerCAmelCase = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__lowerCAmelCase = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__lowerCAmelCase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__lowerCAmelCase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__lowerCAmelCase = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 301 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase :List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[str] = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 361 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 135 | 0 |
import requests
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> None:
"""simple docstring"""
A : Any = {"""Content-Type""": """application/json"""}
A : str = requests.post(_lowerCAmelCase , json={"""text""": message_body} , headers=_lowerCAmelCase )
if response.status_code != 200:
A : int = (
"""Request to slack returned an error """
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 116 |
from __future__ import annotations
from math import ceil, floor, sqrt
def __UpperCamelCase ( _lowerCAmelCase = 200_0000 ) -> int:
"""simple docstring"""
A : list[int] = [0]
A : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
A : int = 0
# the area corresponding to the grid that gives the product closest to target
A : int = 0
# an estimate of b, using the quadratic formula
A : float
# the largest integer less than b_estimate
A : int
# the largest integer less than b_estimate
A : int
# the triangle number corresponding to b_floor
A : int
# the triangle number corresponding to b_ceil
A : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
A : Union[str, Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
A : List[Any] = floor(_lowerCAmelCase )
A : Tuple = ceil(_lowerCAmelCase )
A : int = triangle_numbers[b_floor]
A : Dict = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
A : Optional[int] = triangle_b_first_guess * triangle_a
A : Optional[int] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
A : Tuple = triangle_b_second_guess * triangle_a
A : Tuple = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 116 | 1 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __a ):
def __init__( self : Optional[int] , a__ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
__magic_name__ = nn.ModuleList(a__ )
def snake_case__ ( self : Union[str, Any] , a__ : torch.FloatTensor , a__ : Union[torch.Tensor, float, int] , a__ : torch.Tensor , a__ : List[torch.tensor] , a__ : List[float] , a__ : Optional[torch.Tensor] = None , a__ : Optional[torch.Tensor] = None , a__ : Optional[torch.Tensor] = None , a__ : Optional[Dict[str, Any]] = None , a__ : bool = False , a__ : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(a__ , a__ , self.nets ) ):
__magic_name__ , __magic_name__ = controlnet(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
# merge samples
if i == 0:
__magic_name__ , __magic_name__ = down_samples, mid_sample
else:
__magic_name__ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a__ , a__ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def snake_case__ ( self : List[str] , a__ : Union[str, os.PathLike] , a__ : bool = True , a__ : Callable = None , a__ : bool = False , a__ : Optional[str] = None , ):
__magic_name__ = 0
__magic_name__ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a__ , is_main_process=a__ , save_function=a__ , safe_serialization=a__ , variant=a__ , )
idx += 1
__magic_name__ = model_path_to_save + F'''_{idx}'''
@classmethod
def snake_case__ ( cls : Optional[int] , a__ : Optional[Union[str, os.PathLike]] , **a__ : List[Any] ):
__magic_name__ = 0
__magic_name__ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__magic_name__ = pretrained_model_path
while os.path.isdir(a__ ):
__magic_name__ = ControlNetModel.from_pretrained(a__ , **a__ )
controlnets.append(a__ )
idx += 1
__magic_name__ = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(a__ )} controlnets loaded from {pretrained_model_path}.''' )
if len(a__ ) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(a__ )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(a__ )
| 98 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase ( a , a ) -> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase ( a ) -> list[str]:
'''simple docstring'''
__magic_name__ = []
__magic_name__ = 11
__magic_name__ = int('''1''' + '''0''' * digit_len )
for num in range(a , a ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a , a ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__magic_name__ = 10
return solutions
def UpperCamelCase ( a = 2 ) -> int:
'''simple docstring'''
__magic_name__ = 1.0
for fraction in fraction_list(a ):
__magic_name__ = Fraction(a )
result *= frac.denominator / frac.numerator
return int(a )
if __name__ == "__main__":
print(solution())
| 98 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A = 1_000 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , A ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 2 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[Any] = TypeVar("T")
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (position - 1) // 2
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 1
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] ) -> None:
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def __A ( self : Union[str, Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCamelCase , __lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCamelCase , __lowerCamelCase = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Update the weight of the given key
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase = (elem, weight)
if position > 0:
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Swap the nodes at the given positions
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase , __lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCamelCase = nodea_pos
__lowerCamelCase = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) -> None:
__lowerCamelCase = {}
__lowerCamelCase = 0
def __repr__( self : Optional[int] ) -> str:
return str(self.connections )
def __len__( self : List[str] ) -> int:
return self.nodes
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowerCamelCase = {}
self.nodes += 1
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = weight
__lowerCamelCase = weight
def __magic_name__ ( __lowerCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__lowerCamelCase = {node: maxsize for node in graph.connections}
__lowerCamelCase = {node: None for node in graph.connections}
__lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCAmelCase , __lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCamelCase = priority_queue.extract_min()
__lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
return dist, parent
| 270 | 0 |
lowercase : Union[str, Any] = 0 # The first color of the flag.
lowercase : int = 1 # The second color of the flag.
lowercase : int = 2 # The third color of the flag.
lowercase : Tuple = (red, white, blue)
def lowerCAmelCase__ ( _a : list ):
if not sequence:
return []
if len(_a ) == 1:
return list(_a )
snake_case_ : Any = 0
snake_case_ : str = len(_a ) - 1
snake_case_ : Any = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case_ , snake_case_ : int = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case_ , snake_case_ : Union[str, Any] = sequence[high], sequence[mid]
high -= 1
else:
snake_case_ : Dict = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(_a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Union[str, Any] = input('''Enter numbers separated by commas:\n''').strip()
lowercase : Union[str, Any] = [int(item.strip()) for item in user_input.split(''',''')]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 36 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = ['image_processor', 'tokenizer']
A : Tuple = 'AutoImageProcessor'
A : Dict = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
snake_case_ : Tuple = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
snake_case_ : Tuple = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
snake_case_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _lowerCAmelCase ( self ) -> Dict:
return ["input_ids", "attention_mask", "pixel_values"]
| 36 | 1 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_lowercase: Optional[Any] = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
_lowercase: Any = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
_lowercase: Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def a( A : int ) -> dict[str, int]:
"""simple docstring"""
a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a( A : List[Any] ) -> str:
"""simple docstring"""
return x[0]
def a( A : Dict ) -> str:
"""simple docstring"""
a = get_letter_count(A )
a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A )
a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=A )
a = ''''''.join(freq_to_letter[freq] )
a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A , reverse=A )
a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A )
def a( A : Union[str, Any] ) -> int:
"""simple docstring"""
a = get_frequency_order(A )
a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 |
from __future__ import annotations
from typing import Any
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
create_state_space_tree(__magic_name__ , [] , 0 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == len(__magic_name__ ):
print(__magic_name__ )
return
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq) | 308 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a :Any = logging.get_logger(__name__)
a :Optional[Any] = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = """marian"""
_SCREAMING_SNAKE_CASE :Tuple = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE :Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _a=58_101 , _a=None , _a=1_024 , _a=12 , _a=4_096 , _a=16 , _a=12 , _a=4_096 , _a=16 , _a=0.0 , _a=0.0 , _a=True , _a=True , _a="gelu" , _a=1_024 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=58_100 , _a=False , _a=58_100 , _a=0 , _a=0 , _a=True , **_a , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_vocab_size or vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = d_model
SCREAMING_SNAKE_CASE__ : int = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[str] = encoder_layers
SCREAMING_SNAKE_CASE__ : Tuple = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout
SCREAMING_SNAKE_CASE__ : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : Any = activation_dropout
SCREAMING_SNAKE_CASE__ : Tuple = activation_function
SCREAMING_SNAKE_CASE__ : Optional[Any] = init_std
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : str = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Optional[int] = use_cache
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
class __a (UpperCamelCase_):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Any = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: """batch"""}
SCREAMING_SNAKE_CASE__ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE__ : Dict = {0: """batch""", 1: """decoder_sequence"""}
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_a , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE__ : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_layers
for i in range(_a ):
SCREAMING_SNAKE_CASE__ : str = {0: """batch""", 2: """past_sequence + sequence"""}
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = super(_a , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.num_layers
for i in range(_a ):
SCREAMING_SNAKE_CASE__ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
SCREAMING_SNAKE_CASE__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _a ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_a , _a , _a , _a , _a )
# Generate decoder inputs
SCREAMING_SNAKE_CASE__ : Any = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE__ : int = self._generate_dummy_inputs_for_encoder_and_decoder(
_a , _a , _a , _a , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(**_a , **_a )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = common_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = common_inputs["""decoder_input_ids"""].shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Any = decoder_seq_length + 3
SCREAMING_SNAKE_CASE__ : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_a , _a )] , dim=1 )
SCREAMING_SNAKE_CASE__ : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.num_layers
SCREAMING_SNAKE_CASE__ : Any = min(_a , _a )
SCREAMING_SNAKE_CASE__ : int = max(_a , _a ) - min_num_layers
SCREAMING_SNAKE_CASE__ : List[str] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_a ):
common_inputs["past_key_values"].append(
(
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE__ : Dict = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_a , _a ):
common_inputs["past_key_values"].append((torch.zeros(_a ), torch.zeros(_a )) )
return common_inputs
def _a ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_a , _a , _a , _a , _a )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : Tuple = seqlen + 2
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.num_layers
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : int = common_inputs["""attention_mask"""].dtype
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_a , _a , dtype=_a )] , dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(_a )
]
return common_inputs
def _a ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : str = tokenizer.num_special_tokens_to_add(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_a )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : int = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : Tuple = dict(tokenizer(_a , return_tensors=_a ) )
return common_inputs
def _a ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
return common_inputs
def _a ( self , _a , _a , _a , _a ) -> Tuple:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[int] = super()._flatten_past_key_values_(_a , _a , _a , _a )
else:
SCREAMING_SNAKE_CASE__ : int = super(_a , self )._flatten_past_key_values_(
_a , _a , _a , _a )
@property
def _a ( self ) -> float:
"""simple docstring"""
return 1E-4
| 56 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [0] * len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Any = [1] * len(__lowerCAmelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCAmelCase )
while queue:
SCREAMING_SNAKE_CASE__ : str = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
SCREAMING_SNAKE_CASE__ : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowerCAmelCase )
print(max(__lowerCAmelCase ) )
# Adjacency list of Graph
a :int = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 56 | 1 |
"""simple docstring"""
import torch
def _SCREAMING_SNAKE_CASE ( ) ->Optional[Any]:
'''simple docstring'''
if torch.cuda.is_available():
a : List[Any] = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 105 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> YolosConfig:
"""simple docstring"""
lowerCamelCase__: str =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__: int =192
lowerCamelCase__: Optional[int] =768
lowerCamelCase__: Any =12
lowerCamelCase__: str =3
lowerCamelCase__: Optional[int] =[800, 1333]
lowerCamelCase__: Union[str, Any] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: int =330
lowerCamelCase__: Optional[Any] =14
lowerCamelCase__: Any =6
lowerCamelCase__: List[str] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__: List[str] =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: List[Any] =12
lowerCamelCase__: Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__: str =[800, 1344]
lowerCamelCase__: int =91
lowerCamelCase__: str ="huggingface/label-files"
lowerCamelCase__: List[str] ="coco-detection-id2label.json"
lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__: str =in_proj_bias[: config.hidden_size]
lowerCamelCase__: str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Any =orig_state_dict.pop(__a )
if "qkv" in key:
lowerCamelCase__: Tuple =key.split("." )
lowerCamelCase__: List[str] =int(key_split[2] )
lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__: int =val[:dim, :]
lowerCamelCase__: str =val[
dim : dim * 2, :
]
lowerCamelCase__: Any =val[-dim:, :]
else:
lowerCamelCase__: Tuple =val[:dim]
lowerCamelCase__: Optional[Any] =val[dim : dim * 2]
lowerCamelCase__: str =val[-dim:]
else:
lowerCamelCase__: Dict =val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =get_yolos_config(__a )
# load original state_dict
lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase__: int =YolosForObjectDetection(__a )
model.eval()
lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a )
lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__: Tuple =model(**__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__: Any =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__: Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase__: List[Any] =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__: Optional[int] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase__: Any =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__: str =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase__: Optional[Any] =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: str =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase__: Union[str, Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase__: Tuple =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase__: Optional[int] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
lowerCamelCase__: Any ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase__: Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization="hustvl" )
model.push_to_hub(__a , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 | 0 |
"""simple docstring"""
from __future__ import annotations
a : Optional[Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _SCREAMING_SNAKE_CASE ( _lowercase : list[list[int]] , _lowercase : list[int] , _lowercase : list[int] , _lowercase : int , _lowercase : list[list[int]] , ) ->tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
a : Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowercase ) )
] # the reference grid
a : Any = 1
a : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowercase ) )
] # the action grid
a : str = init[0]
a : Any = init[1]
a : List[str] = 0
a : int = g + heuristic[x][y] # cost from starting cell to destination cell
a : Dict = [[f, g, x, y]]
a : List[Any] = False # flag that is set when search is complete
a : Optional[int] = False # flag set if we can't find expand
while not found and not resign:
if len(_lowercase ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
a : Optional[int] = cell.pop()
a : Dict = next_cell[2]
a : int = next_cell[3]
a : Union[str, Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
a : List[Any] = True
else:
for i in range(len(_lowercase ) ): # to try out different valid actions
a : Optional[Any] = x + DIRECTIONS[i][0]
a : List[Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowercase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
a : Dict = g + cost
a : List[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
a : Any = 1
a : Union[str, Any] = i
a : Tuple = []
a : Tuple = goal[0]
a : Optional[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
a : Dict = x - DIRECTIONS[action[x][y]][0]
a : Union[str, Any] = y - DIRECTIONS[action[x][y]][1]
a : Optional[int] = xa
a : Tuple = ya
invpath.append([x, y] )
a : Optional[int] = []
for i in range(len(_lowercase ) ):
path.append(invpath[len(_lowercase ) - 1 - i] )
return path, action
if __name__ == "__main__":
a : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
a : int = [0, 0]
# all coordinates are given in format [y,x]
a : Dict = [len(grid) - 1, len(grid[0]) - 1]
a : int = 1
# the cost map which pushes the path closer to the goal
a : Optional[int] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
a : Optional[Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
a : Optional[Any] = 99
a , a : Optional[int] = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 79 |
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 100 ) ->int:
'''simple docstring'''
a : Dict = sum(i * i for i in range(1 , n + 1 ) )
a : Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 79 | 1 |
"""simple docstring"""
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case ( lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Dict = """philschmid/bart-large-cnn-samsum"""
SCREAMING_SNAKE_CASE_ : int = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """summarizer"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer
SCREAMING_SNAKE_CASE_ : str = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE_ : int = ["""text"""]
SCREAMING_SNAKE_CASE_ : Any = ["""text"""]
def lowercase_ ( self : str , UpperCamelCase__ : Tuple)-> List[str]:
'''simple docstring'''
return self.pre_processor(lowerCAmelCase__ , return_tensors="pt" , truncation=lowerCAmelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : Optional[int])-> int:
'''simple docstring'''
return self.model.generate(**lowerCAmelCase__)[0]
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str)-> Optional[Any]:
'''simple docstring'''
return self.pre_processor.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__)
| 217 |
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int = 10_00 ) -> int:
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = 0
while n:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 113 | 0 |
'''simple docstring'''
import numpy as np
a : str = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class a :
def __init__( self : Dict ):
snake_case_ = np.array(lowercase_ )
def A_ ( self : Tuple , lowercase_ : str ):
snake_case_ ,snake_case_ = np.where(letter == self.SQUARE )
snake_case_ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def A_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
snake_case_ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def A_ ( self : Union[str, Any] , lowercase_ : str ):
snake_case_ = message.lower()
snake_case_ = message.replace(''' ''' , '''''' )
snake_case_ = message.replace('''j''' , '''i''' )
snake_case_ = np.empty((2, len(lowercase_ )) )
for letter_index in range(len(lowercase_ ) ):
snake_case_ = self.letter_to_numbers(message[letter_index] )
snake_case_ = numbers[0]
snake_case_ = numbers[1]
snake_case_ = first_step.reshape(2 * len(lowercase_ ) )
snake_case_ = ''''''
for numbers_index in range(len(lowercase_ ) ):
snake_case_ = int(second_step[numbers_index * 2] )
snake_case_ = int(second_step[(numbers_index * 2) + 1] )
snake_case_ = self.numbers_to_letter(lowercase_ , lowercase_ )
snake_case_ = encoded_message + letter
return encoded_message
def A_ ( self : Dict , lowercase_ : str ):
snake_case_ = message.lower()
message.replace(''' ''' , '''''' )
snake_case_ = np.empty(2 * len(lowercase_ ) )
for letter_index in range(len(lowercase_ ) ):
snake_case_ = self.letter_to_numbers(message[letter_index] )
snake_case_ = numbers[0]
snake_case_ = numbers[1]
snake_case_ = first_step.reshape((2, len(lowercase_ )) )
snake_case_ = ''''''
for numbers_index in range(len(lowercase_ ) ):
snake_case_ = int(second_step[0, numbers_index] )
snake_case_ = int(second_step[1, numbers_index] )
snake_case_ = self.numbers_to_letter(lowercase_ , lowercase_ )
snake_case_ = decoded_message + letter
return decoded_message
| 72 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class a ( unittest.TestCase ):
def A_ ( self : List[Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Optional[Any] ):
snake_case_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : List[Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : str ):
snake_case_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Optional[int] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Union[str, Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : int ):
snake_case_ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : Any ):
# pass variant but use the non-variant filenames
snake_case_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : int ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
snake_case_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : str ):
snake_case_ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : Tuple ):
# pass variant but use the non-variant filenames
snake_case_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : List[str] ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
| 72 | 1 |
import baseaa
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bytes:
"""simple docstring"""
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase_ ( UpperCamelCase__ : bytes ) -> str:
"""simple docstring"""
return baseaa.aaadecode(UpperCamelCase__ ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 |
from math import pi, sqrt, tan
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__lowerCamelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(UpperCamelCase__ , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__lowerCamelCase = (sidea + sidea + sidea) / 2
__lowerCamelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print("\nSurface Areas of various geometric shapes: \n")
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 90 | 1 |
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [0, 2, 4, 6, 8]
SCREAMING_SNAKE_CASE__ : Dict = [1, 3, 5, 7, 9]
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__lowerCamelCase = 0
for digit in range(10 ):
__lowerCamelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __lowerCAmelCase , __lowerCAmelCase )
return result
__lowerCamelCase = 0
for digita in range(10 ):
__lowerCamelCase = digita
if (remainder + digita) % 2 == 0:
__lowerCamelCase = ODD_DIGITS
else:
__lowerCamelCase = EVEN_DIGITS
for digita in other_parity_digits:
__lowerCamelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __lowerCAmelCase , __lowerCAmelCase , )
return result
def __magic_name__ ( __lowerCAmelCase : int = 9 ) -> int:
__lowerCamelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__lowerCAmelCase , 0 , [0] * length , __lowerCAmelCase )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 351 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Optional[int] ) -> Union[str, Any]:
__lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
__lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
__lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits
__lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean()
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 339 | 0 |
'''simple docstring'''
import requests
def __a ( UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
A = {"""Content-Type""": """application/json"""}
A = requests.post(UpperCAmelCase , json={"""text""": message_body} , headers=UpperCAmelCase )
if response.status_code != 200:
A = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(UpperCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 258 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 1 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowerCAmelCase : List[str] = """bert-base-cased"""
lowerCAmelCase : Dict = """fp16"""
lowerCAmelCase : Any = """bf16"""
lowerCAmelCase : Any = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : List[str] = dict(
ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , )
def a ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(snake_case__ ):
_lowerCAmelCase : List[str] = self.dist_env.copy()
_lowerCAmelCase : Optional[Any] = F'{i + 1}'
_lowerCAmelCase : Optional[Any] = strategy
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def a ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(snake_case__ ):
_lowerCAmelCase : str = self.dist_env.copy()
_lowerCAmelCase : str = prefetch_policy
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : List[Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def a ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(snake_case__ ):
_lowerCAmelCase : List[str] = self.dist_env.copy()
_lowerCAmelCase : str = state_dict_type
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoModel.from_pretrained(snake_case__ )
for policy in FSDP_AUTO_WRAP_POLICY:
_lowerCAmelCase : List[str] = self.dist_env.copy()
_lowerCAmelCase : int = policy
if policy == "TRANSFORMER_BASED_WRAP":
_lowerCAmelCase : str = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
_lowerCAmelCase : List[Any] = '2000'
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(snake_case__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_lowerCAmelCase : Any = self.dist_env.copy()
_lowerCAmelCase : str = 'TRANSFORMER_BASED_WRAP'
_lowerCAmelCase : Optional[Any] = 'T5Layer'
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : int = FullyShardedDataParallelPlugin()
with self.assertRaises(snake_case__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(snake_case__ )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
_lowerCAmelCase : List[Any] = self.dist_env.copy()
_lowerCAmelCase : Optional[int] = 'SIZE_BASED_WRAP'
_lowerCAmelCase : List[str] = '0'
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(snake_case__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def a ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_lowerCAmelCase : Dict = self.dist_env.copy()
_lowerCAmelCase : int = mp_dtype
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : List[str] = Accelerator()
if mp_dtype == "fp16":
_lowerCAmelCase : Any = torch.floataa
elif mp_dtype == "bf16":
_lowerCAmelCase : List[Any] = torch.bfloataa
_lowerCAmelCase : Tuple = MixedPrecision(param_dtype=snake_case__ , reduce_dtype=snake_case__ , buffer_dtype=snake_case__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , snake_case__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , snake_case__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(snake_case__ )
def a ( self ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_lowerCAmelCase : List[Any] = self.dist_env.copy()
_lowerCAmelCase : Dict = str(snake_case__ ).lower()
with mockenv_context(**snake_case__ ):
_lowerCAmelCase : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=snake_case__ ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : List[str] = 0.82
_lowerCAmelCase : int = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
_lowerCAmelCase : Any = {
'multi_gpu_fp16': 3200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_lowerCAmelCase : str = 160
_lowerCAmelCase : Any = 160
_lowerCAmelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = os.path.join(self.test_scripts_folder , 'test_performance.py' )
_lowerCAmelCase : Dict = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
_lowerCAmelCase : List[Any] = cmd.copy()
for i, strategy in enumerate(snake_case__ ):
if strategy.lower() in config:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--performance_lower_bound={self.performance_lower_bound}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = os.path.join(self.test_scripts_folder , 'test_checkpointing.py' )
_lowerCAmelCase : Any = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(snake_case__ ):
_lowerCAmelCase : List[str] = cmd.copy()
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
if strategy != "FULL_SHARD":
continue
_lowerCAmelCase : Any = len(snake_case__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_lowerCAmelCase : List[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(F'--fsdp_state_dict_type={state_dict_type}' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
_lowerCAmelCase : Any = cmd_config[:-1]
_lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdir , 'epoch_0' )
cmd_config.extend(
[
F'--resume_from_checkpoint={resume_from_checkpoint}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py' )
_lowerCAmelCase : Dict = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_lowerCAmelCase : str = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(snake_case__ ):
if strategy.lower() in spec:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--peak_memory_upper_bound={peak_mem_upper_bound}',
F'--n_train={self.n_train}',
F'--n_val={self.n_val}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
| 25 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 3.0
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : List[str] = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : List[Any] = """"""
lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 25 | 1 |
from copy import deepcopy
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ = None, __magic_name__ = None ) -> None:
"""simple docstring"""
if arr is None and size is not None:
UpperCamelCase__ : List[Any] = size
UpperCamelCase__ : Tuple = [0] * size
elif arr is not None:
self.init(__magic_name__ )
else:
raise ValueError('''Either arr or size must be specified''' )
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : str = len(__magic_name__ )
UpperCamelCase__ : List[Any] = deepcopy(__magic_name__ )
for i in range(1, self.size ):
UpperCamelCase__ : str = self.next_(__magic_name__ )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCamelCase__ ( self ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ : str = self.tree[:]
for i in range(self.size - 1, 0, -1 ):
UpperCamelCase__ : List[str] = self.next_(__magic_name__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCamelCase__ ( __magic_name__ ) -> int:
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def UpperCamelCase__ ( __magic_name__ ) -> int:
"""simple docstring"""
return index - (index & (-index))
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ : List[Any] = self.next_(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
self.add(__magic_name__, value - self.get(__magic_name__ ) )
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
if right == 0:
return 0
UpperCamelCase__ : str = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ : List[str] = self.prev(__magic_name__ )
return result
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> int:
"""simple docstring"""
return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
return self.query(__magic_name__, index + 1 )
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ : List[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ : Union[str, Any] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = 256
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : List[str] = ["melgan"]
def __init__( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
UpperCamelCase__ : Optional[int] = math.log(1E-5 ) # Matches MelGAN training.
UpperCamelCase__ : int = 4.0 # Largest value for most examples
UpperCamelCase__ : Optional[int] = 128
self.register_modules(
notes_encoder=__magic_name__, continuous_encoder=__magic_name__, decoder=__magic_name__, scheduler=__magic_name__, melgan=__magic_name__, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=(-1.0, 1.0), __magic_name__=False ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : str = output_range
if clip:
UpperCamelCase__ : Union[str, Any] = torch.clip(__magic_name__, self.min_value, self.max_value )
# Scale to [0, 1].
UpperCamelCase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=(-1.0, 1.0), __magic_name__=False ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = input_range
UpperCamelCase__ : Any = torch.clip(__magic_name__, __magic_name__, __magic_name__ ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase__ : Any = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = input_tokens > 0
UpperCamelCase__ ,UpperCamelCase__ : Any = self.notes_encoder(
encoder_input_tokens=__magic_name__, encoder_inputs_mask=__magic_name__ )
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self.continuous_encoder(
encoder_inputs=__magic_name__, encoder_inputs_mask=__magic_name__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : Any = noise_time
if not torch.is_tensor(__magic_name__ ):
UpperCamelCase__ : Tuple = torch.tensor([timesteps], dtype=torch.long, device=input_tokens.device )
elif torch.is_tensor(__magic_name__ ) and len(timesteps.shape ) == 0:
UpperCamelCase__ : Union[str, Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ : Dict = timesteps * torch.ones(input_tokens.shape[0], dtype=timesteps.dtype, device=timesteps.device )
UpperCamelCase__ : List[str] = self.decoder(
encodings_and_masks=__magic_name__, decoder_input_tokens=__magic_name__, decoder_noise_time=__magic_name__ )
return logits
@torch.no_grad()
def __call__( self, __magic_name__, __magic_name__ = None, __magic_name__ = 100, __magic_name__ = True, __magic_name__ = "numpy", __magic_name__ = None, __magic_name__ = 1, ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__, __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__magic_name__ )}." )
UpperCamelCase__ : Dict = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims], dtype=np.floataa )
UpperCamelCase__ : Tuple = np.zeros([1, 0, self.n_dims], np.floataa )
UpperCamelCase__ : List[Any] = torch.ones((1, TARGET_FEATURE_LENGTH), dtype=__magic_name__, device=self.device )
for i, encoder_input_tokens in enumerate(__magic_name__ ):
if i == 0:
UpperCamelCase__ : str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device, dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase__ : Any = torch.zeros((1, TARGET_FEATURE_LENGTH), dtype=__magic_name__, device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase__ : List[str] = ones
UpperCamelCase__ : int = self.scale_features(
__magic_name__, output_range=[-1.0, 1.0], clip=__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ), continuous_inputs=__magic_name__, continuous_mask=__magic_name__, )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase__ : Optional[int] = randn_tensor(
shape=encoder_continuous_inputs.shape, generator=__magic_name__, device=self.device, dtype=self.decoder.dtype, )
# set step values
self.scheduler.set_timesteps(__magic_name__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase__ : Union[str, Any] = self.decode(
encodings_and_masks=__magic_name__, input_tokens=__magic_name__, noise_time=t / self.scheduler.config.num_train_timesteps, )
# Compute previous output: x_t -> x_t-1
UpperCamelCase__ : List[Any] = self.scheduler.step(__magic_name__, __magic_name__, __magic_name__, generator=__magic_name__ ).prev_sample
UpperCamelCase__ : List[Any] = self.scale_to_features(__magic_name__, input_range=[-1.0, 1.0] )
UpperCamelCase__ : List[Any] = mel[:1]
UpperCamelCase__ : int = mel.cpu().float().numpy()
UpperCamelCase__ : Union[str, Any] = np.concatenate([full_pred_mel, pred_mel[:1]], axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__, __magic_name__ )
logger.info('''Generated segment''', __magic_name__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
UpperCamelCase__ : Optional[int] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase__ : Any = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__magic_name__ )
| 201 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE ={
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 351 | """simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
lowercase_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'patrickvonplaten/t5-tiny-random'
lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] )
lowercase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'current' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
| 321 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : List[Any], _lowerCAmelCase : Any ):
"""simple docstring"""
_a = MobileBertConfig.from_json_file(_lowerCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
_a = MobileBertForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
_a = load_tf_weights_in_mobilebert(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict(), _lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 320 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
_a = parent
_a = batch_size
_a = encoder_seq_length
_a = decoder_seq_length
# For common tests
_a = self.decoder_seq_length
_a = is_training
_a = use_attention_mask
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = d_ff
_a = relative_attention_num_buckets
_a = dropout_rate
_a = initializer_factor
_a = eos_token_id
_a = pad_token_id
_a = decoder_start_token_id
_a = None
_a = decoder_layers
def _UpperCAmelCase ( self ) -> Dict:
return TaConfig.from_pretrained('''google/umt5-base''' )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
if attention_mask is None:
_a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase )
if decoder_head_mask is None:
_a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
if cross_attn_head_mask is None:
_a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _UpperCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_a = input_ids.clamp(self.pad_token_id + 1 )
_a = decoder_input_ids.clamp(self.pad_token_id + 1 )
_a = self.get_config()
_a = config.num_attention_heads
_a = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, input_dict
def _UpperCAmelCase ( self ) -> int:
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self ) -> List[str]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict:
_a = UMTaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(
input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , )
_a = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
_a = result.last_hidden_state
_a = result.past_key_values
_a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]:
_a = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval()
# first forward pass
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_a , _a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = model(__UpperCAmelCase )['''last_hidden_state''']
_a = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state''']
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -1, random_slice_idx].detach()
_a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]:
_a = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval()
_a = model(**__UpperCAmelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() )
@require_torch
class __lowerCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A_ : Optional[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A_ : int = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A_ : str = True
A_ : List[str] = False
A_ : List[Any] = False
A_ : str = True
A_ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A_ : Optional[Any] = [0.8, 0.9]
def _UpperCAmelCase ( self ) -> Tuple:
_a = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def _UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
_a = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
_a = self.model_tester.prepare_config_and_inputs()
_a = config_and_inputs[0]
_a = UMTaForConditionalGeneration(__UpperCAmelCase ).eval()
model.to(__UpperCAmelCase )
_a = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ):
_a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_a = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase )
_a = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def _UpperCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase )
_a = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase )
_a = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
_a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids
# fmt: off
_a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase )
_a = model.generate(input_ids.to(__UpperCAmelCase ) )
_a = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
_a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) | 320 | 1 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 1
@register_to_config
def __init__( self , _UpperCAmelCase = 1000 , _UpperCAmelCase = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(_UpperCAmelCase )
# standard deviation of the initial noise distribution
__a : Dict = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__a : Union[str, Any] = 4
# running values
__a : Optional[Any] = []
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Union[str, Any] = num_inference_steps
__a : str = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__a : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__a : Tuple = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__a : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__a : Optional[int] = (1.0 - self.betas**2) ** 0.5
__a : Dict = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__a : Optional[Any] = timesteps.to(_UpperCAmelCase )
__a : Any = []
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
__a : Optional[int] = (self.timesteps == timestep).nonzero().item()
__a : Tuple = timestep_index + 1
__a : Optional[Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_UpperCAmelCase )
if len(self.ets ) == 1:
__a : Dict = self.ets[-1]
elif len(self.ets ) == 2:
__a : str = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__a : Optional[int] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__a : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__a : List[str] = self._get_prev_sample(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ):
return sample
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = self.alphas[timestep_index]
__a : Optional[int] = self.betas[timestep_index]
__a : int = self.alphas[prev_timestep_index]
__a : Optional[Any] = self.betas[prev_timestep_index]
__a : List[str] = (sample - sigma * ets) / max(_UpperCAmelCase , 1e-8 )
__a : Any = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
return self.config.num_train_timesteps | 188 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
A = TypeVar('''T''')
class __lowercase ( Generic[T] ):
'''simple docstring'''
__lowerCAmelCase = 42 # Cache store of keys
__lowerCAmelCase = 42 # References of the keys in cache
__lowerCAmelCase = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ):
__a : Optional[int] = deque()
__a : Dict = set()
if not n:
__a : List[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
__a : str = n
def _lowerCamelCase ( self , _UpperCAmelCase ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__a : int = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _lowerCamelCase ( self ):
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ):
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
A = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" | 188 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _snake_case ( lowercase_ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
snake_case_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "A red cat sitting on a park bench"
snake_case_ = np.random.RandomState(0 )
snake_case_ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type="np" , )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
snake_case_ = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
snake_case_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "A red cat sitting on a park bench"
snake_case_ = np.random.RandomState(0 )
snake_case_ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type="np" , )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 85 |
'''simple docstring'''
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(snake_case , snake_case )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 85 | 1 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : str , *__lowerCamelCase : Dict , **__lowerCamelCase : List[str] ) -> None:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase ) | 256 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
__SCREAMING_SNAKE_CASE = {"""bert_for_seq_generation""": 512}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = []
a__ = ["input_ids", "attention_mask"]
def __init__( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Optional[int]="<::::>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Tuple , ) -> None:
A : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
A : Union[str, Any] = vocab_file
A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
A : str = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Tuple:
A : Tuple = self.__dict__.copy()
A : Optional[int] = None
return state
def __setstate__( self : Dict , __lowerCamelCase : Union[str, Any] ) -> Tuple:
A : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A : int = {}
A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[str, Any] ) -> Dict:
return self.sp_model.piece_to_id(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
A : Optional[int] = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[int] ) -> List[str]:
A : List[str] = []
A : List[str] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
A : Union[str, Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A : str = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
A : str = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,) | 256 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCamelCase : List[Any] = NewType('''DataClass''', Any)
lowerCamelCase : Optional[int] = NewType('''DataClassType''', Any)
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def snake_case_ ( lowerCAmelCase_ : list ):
__lowercase : Optional[Any] = {str(lowerCAmelCase_ ): choice for choice in choices}
return lambda lowerCAmelCase_ : str_to_choice.get(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( *,
lowerCAmelCase_ : Union[str, List[str]] = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Any = dataclasses.MISSING , lowerCAmelCase_ : Callable[[], Any] = dataclasses.MISSING , lowerCAmelCase_ : dict = None , **lowerCAmelCase_ : Optional[Any] , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__lowercase : Dict = {}
if aliases is not None:
__lowercase : int = aliases
if help is not None:
__lowercase : Tuple = help
return dataclasses.field(metadata=lowerCAmelCase_ , default=lowerCAmelCase_ , default_factory=lowerCAmelCase_ , **lowerCAmelCase_ )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Iterable[DataClassType]
def __init__( self : Dict , __a : Union[DataClassType, Iterable[DataClassType]] , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
if "formatter_class" not in kwargs:
__lowercase : Optional[Any] = ArgumentDefaultsHelpFormatter
super().__init__(**__a )
if dataclasses.is_dataclass(__a ):
__lowercase : str = [dataclass_types]
__lowercase : str = list(__a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__a )
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser , __a : dataclasses.Field ) -> str:
"""simple docstring"""
__lowercase : int = F"--{field.name}"
__lowercase : int = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __a ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
__lowercase : str = kwargs.pop("""aliases""" , [] )
if isinstance(__a , __a ):
__lowercase : Tuple = [aliases]
__lowercase : Optional[int] = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(__a , """UnionType""" ) and isinstance(__a , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__a ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
F" Problem encountered in field '{field.name}'." )
if type(__a ) not in field.type.__args__:
# filter `str` in Union
__lowercase : Optional[int] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__lowercase : Dict = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__lowercase : Dict = (
field.type.__args__[0] if isinstance(__a , field.type.__args__[1] ) else field.type.__args__[1]
)
__lowercase : int = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__lowercase : List[str] = {}
if origin_type is Literal or (isinstance(field.type , __a ) and issubclass(field.type , __a )):
if origin_type is Literal:
__lowercase : Union[str, Any] = field.type.__args__
else:
__lowercase : int = [x.value for x in field.type]
__lowercase : List[Any] = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
__lowercase : List[Any] = field.default
else:
__lowercase : int = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__lowercase : List[Any] = copy(__a )
# Hack because type=bool in argparse does not behave as we want.
__lowercase : Tuple = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__lowercase : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__lowercase : int = default
# This tells argparse we accept 0 or 1 value after --field_name
__lowercase : Any = """?"""
# This is the value that will get picked if we do --field_name (without value)
__lowercase : str = True
elif isclass(__a ) and issubclass(__a , __a ):
__lowercase : Optional[int] = field.type.__args__[0]
__lowercase : Union[str, Any] = """+"""
if field.default_factory is not dataclasses.MISSING:
__lowercase : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
__lowercase : List[str] = True
else:
__lowercase : Optional[int] = field.type
if field.default is not dataclasses.MISSING:
__lowercase : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
__lowercase : Optional[int] = field.default_factory()
else:
__lowercase : int = True
parser.add_argument(__a , *__a , **__a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__lowercase : int = False
parser.add_argument(F"--no_{field.name}" , action="""store_false""" , dest=field.name , **__a )
def lowerCAmelCase ( self : Tuple , __a : DataClassType ) -> Optional[Any]:
"""simple docstring"""
if hasattr(__a , """_argument_group_name""" ):
__lowercase : Optional[Any] = self.add_argument_group(dtype._argument_group_name )
else:
__lowercase : Union[str, Any] = self
try:
__lowercase : Dict[str, type] = get_type_hints(__a )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__a ):
__lowercase : List[str] = """.""".join(map(__a , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(__a ):
if not field.init:
continue
__lowercase : Optional[Any] = type_hints[field.name]
self._parse_dataclass_field(__a , __a )
def lowerCAmelCase ( self : List[Any] , __a : str=None , __a : Optional[int]=False , __a : Union[str, Any]=True , __a : Tuple=None , __a : Tuple=None , ) -> Tuple[DataClass, ...]:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__lowercase : Optional[int] = []
if args_filename:
args_files.append(Path(__a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__lowercase : Any = ArgumentParser()
args_file_parser.add_argument(__a , type=__a , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
__lowercase , __lowercase : Optional[int] = args_file_parser.parse_known_args(args=__a )
__lowercase : Optional[Any] = vars(__a ).get(args_file_flag.lstrip("""-""" ) , __a )
if cmd_args_file_paths:
args_files.extend([Path(__a ) for p in cmd_args_file_paths] )
__lowercase : Optional[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__lowercase : Union[str, Any] = file_args + args if args is not None else file_args + sys.argv[1:]
__lowercase , __lowercase : List[str] = self.parse_known_args(args=__a )
__lowercase : Any = []
for dtype in self.dataclass_types:
__lowercase : Dict = {f.name for f in dataclasses.fields(__a ) if f.init}
__lowercase : Optional[int] = {k: v for k, v in vars(__a ).items() if k in keys}
for k in keys:
delattr(__a , __a )
__lowercase : str = dtype(**__a )
outputs.append(__a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def lowerCAmelCase ( self : str , __a : Dict[str, Any] , __a : bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
__lowercase : Optional[int] = set(args.keys() )
__lowercase : Dict = []
for dtype in self.dataclass_types:
__lowercase : int = {f.name for f in dataclasses.fields(__a ) if f.init}
__lowercase : str = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__lowercase : Tuple = dtype(**__a )
outputs.append(__a )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(__a )}" )
return tuple(__a )
def lowerCAmelCase ( self : Tuple , __a : str , __a : bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
with open(Path(__a ) , encoding="""utf-8""" ) as open_json_file:
__lowercase : Optional[int] = json.loads(open_json_file.read() )
__lowercase : Optional[int] = self.parse_dict(__a , allow_extra_keys=__a )
return tuple(__a )
def lowerCAmelCase ( self : int , __a : str , __a : bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
__lowercase : Optional[Any] = self.parse_dict(yaml.safe_load(Path(__a ).read_text() ) , allow_extra_keys=__a )
return tuple(__a ) | 233 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[str] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
__lowercase : Optional[Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__lowercase : Union[str, Any] = 128
elif "12-12" in model_name:
__lowercase : Tuple = 12
__lowercase : List[Any] = 12
elif "14-14" in model_name:
__lowercase : int = 14
__lowercase : Dict = 14
elif "16-16" in model_name:
__lowercase : str = 16
__lowercase : Dict = 16
else:
raise ValueError("""Model not supported""" )
__lowercase : Union[str, Any] = """huggingface/label-files"""
if "speech-commands" in model_name:
__lowercase : List[Any] = 35
__lowercase : str = """speech-commands-v2-id2label.json"""
else:
__lowercase : Any = 527
__lowercase : Dict = """audioset-id2label.json"""
__lowercase : Optional[Any] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : List[str] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : List[str] = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
if "module.v" in name:
__lowercase : List[str] = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__lowercase : int = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__lowercase : str = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__lowercase : Any = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowercase : Optional[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__lowercase : Union[str, Any] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__lowercase : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowercase : Union[str, Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase : Tuple = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase : Any = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase : Optional[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__lowercase : Tuple = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__lowercase : Optional[Any] = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__lowercase : List[str] = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ):
for key in orig_state_dict.copy().keys():
__lowercase : str = orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
__lowercase : Optional[int] = key.split(""".""" )
__lowercase : Optional[int] = int(key_split[3] )
__lowercase : Any = config.hidden_size
if "weight" in key:
__lowercase : Union[str, Any] = val[:dim, :]
__lowercase : Union[str, Any] = val[dim : dim * 2, :]
__lowercase : Dict = val[-dim:, :]
else:
__lowercase : Optional[int] = val[:dim]
__lowercase : Any = val[dim : dim * 2]
__lowercase : int = val[-dim:]
else:
__lowercase : Any = val
return orig_state_dict
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
__lowercase : int = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int=False ):
__lowercase : List[Any] = get_audio_spectrogram_transformer_config(lowerCAmelCase_ )
__lowercase : Tuple = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__lowercase : str = model_name_to_url[model_name]
__lowercase : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )
# remove some keys
remove_keys(lowerCAmelCase_ )
# rename some keys
__lowercase : Optional[Any] = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
# load 🤗 model
__lowercase : str = ASTForAudioClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__lowercase : Tuple = -4.2_677_393 if """speech-commands""" not in model_name else -6.845_978
__lowercase : Optional[int] = 4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526
__lowercase : Union[str, Any] = 1024 if """speech-commands""" not in model_name else 128
__lowercase : Any = ASTFeatureExtractor(mean=lowerCAmelCase_ , std=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
if "speech-commands" in model_name:
__lowercase : Optional[int] = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__lowercase : Union[str, Any] = dataset[0]["""audio"""]["""array"""]
else:
__lowercase : List[Any] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__lowercase , __lowercase : Union[str, Any] = torchaudio.load(lowerCAmelCase_ )
__lowercase : Union[str, Any] = waveform.squeeze().numpy()
__lowercase : int = feature_extractor(lowerCAmelCase_ , sampling_rate=16000 , return_tensors="""pt""" )
# forward pass
__lowercase : Tuple = model(**lowerCAmelCase_ )
__lowercase : int = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__lowercase : Union[str, Any] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__lowercase : Optional[int] = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__lowercase : Union[str, Any] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__lowercase : Optional[Any] = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__lowercase : List[Any] = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__lowercase : str = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__lowercase : List[str] = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__lowercase : List[Any] = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase : Tuple = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 233 | 1 |
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase :
@property
def _lowercase (self : Optional[int]) -> List[str]:
return self.get_dummy_input()
@property
def _lowercase (self : Any) -> Union[str, Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.")
def _lowercase (self : Optional[int] , _A : Optional[int]=True , _A : Optional[int]=False , _A : Tuple=False , _A : str=False , ) -> List[str]:
__snake_case : int = 4
__snake_case : int = 32
__snake_case : List[Any] = (32, 32)
__snake_case : str = torch.manual_seed(0)
__snake_case : str = torch.device(_A)
__snake_case : Tuple = (batch_size, num_channels) + sizes
__snake_case : Optional[Any] = randn_tensor(_A , generator=_A , device=_A)
__snake_case : List[str] = {'hidden_states': hidden_states}
if include_temb:
__snake_case : Any = 1_28
__snake_case : Optional[Any] = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A)
if include_res_hidden_states_tuple:
__snake_case : List[str] = torch.manual_seed(1)
__snake_case : Optional[int] = (randn_tensor(_A , generator=_A , device=_A),)
if include_encoder_hidden_states:
__snake_case : Optional[Any] = floats_tensor((batch_size, 32, 32)).to(_A)
if include_skip_sample:
__snake_case : Optional[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A)
return dummy_input
def _lowercase (self : int) -> Dict:
__snake_case : Union[str, Any] = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 1_28,
}
if self.block_type == "up":
__snake_case : int = 32
if self.block_type == "mid":
init_dict.pop('out_channels')
__snake_case : str = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self : List[str] , _A : Optional[int]) -> str:
__snake_case , __snake_case : int = self.prepare_init_args_and_inputs_for_common()
__snake_case : Any = self.block_class(**_A)
unet_block.to(_A)
unet_block.eval()
with torch.no_grad():
__snake_case : Any = unet_block(**_A)
if isinstance(_A , _A):
__snake_case : List[Any] = output[0]
self.assertEqual(output.shape , self.output_shape)
__snake_case : Optional[int] = output[0, -1, -3:, -3:]
__snake_case : Union[str, Any] = torch.tensor(_A).to(_A)
assert torch_all_close(output_slice.flatten() , _A , atol=5E-3)
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps')
def _lowercase (self : str) -> List[Any]:
__snake_case , __snake_case : int = self.prepare_init_args_and_inputs_for_common()
__snake_case : Tuple = self.block_class(**_A)
model.to(_A)
model.train()
__snake_case : Any = model(**_A)
if isinstance(_A , _A):
__snake_case : Any = output[0]
__snake_case : Tuple = torch.device(_A)
__snake_case : List[str] = randn_tensor(output.shape , device=_A)
__snake_case : Union[str, Any] = torch.nn.functional.mse_loss(_A , _A)
loss.backward()
| 95 | """simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCamelCase ( lowercase ):
@require_torch
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__snake_case : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
__snake_case : Tuple = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
__snake_case : int = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
__snake_case : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_A)
BertModel.from_pretrained(_A)
BertTokenizer.from_pretrained(_A)
pipeline(task='fill-mask' , model=_A)
# baseline - just load from_pretrained with normal network
__snake_case : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
__snake_case : Union[str, Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__snake_case : str = '1'
__snake_case : Union[str, Any] = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def _lowercase (self : Union[str, Any]) -> Union[str, Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__snake_case : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
__snake_case : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
__snake_case : Union[str, Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
__snake_case : str = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_A)
BertModel.from_pretrained(_A)
BertTokenizer.from_pretrained(_A)
pipeline(task='fill-mask' , model=_A)
# baseline - just load from_pretrained with normal network
__snake_case : Any = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
__snake_case : int = self.get_env()
__snake_case : Tuple = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def _lowercase (self : int) -> Any:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__snake_case : int = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
__snake_case : int = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
__snake_case : Optional[int] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
__snake_case : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
__snake_case : Optional[int] = self.get_env()
__snake_case : Dict = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
# next emulate no network
__snake_case : Optional[Any] = [sys.executable, '-c', '\n'.join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__snake_case : Union[str, Any] = '1'
__snake_case : str = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def _lowercase (self : str) -> Dict:
__snake_case : Dict = '\nfrom transformers import pipeline\n '
__snake_case : List[Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
__snake_case : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
__snake_case : str = self.get_env()
__snake_case : Tuple = '1'
__snake_case : Tuple = [sys.executable, '-c', '\n'.join([load, mock, run])]
__snake_case : Optional[Any] = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 1 , result.stderr)
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '') , )
@require_torch
def _lowercase (self : int) -> Optional[Any]:
__snake_case : int = '\nfrom transformers import AutoModel\n '
__snake_case : str = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
__snake_case : str = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
__snake_case : str = self.get_env()
__snake_case : Dict = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__snake_case : List[str] = '1'
__snake_case : Optional[int] = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
| 95 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_UpperCamelCase = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275 |
def _lowercase ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__lowerCAmelCase : int = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
__lowerCAmelCase : Any = str(bin(lowercase__ ) )[2:]
__lowerCAmelCase : List[str] = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | 1 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for i in range(0, lowerCAmelCase__ ):
for _ in range(0, n - i - 1 ): # printing spaces
print(""" """, end="""""" )
for _ in range(0, i + 1 ): # printing stars
print("""* """, end="""""" )
print()
def __lowerCamelCase ( __snake_case : int ) -> Tuple:
"""simple docstring"""
for i in range(lowerCAmelCase__, 0, -1 ):
for _ in range(lowerCAmelCase__, 0, -1 ): # printing stars
print("""* """, end="""""" )
print()
for _ in range(n - i + 1, 0, -1 ): # printing spaces
print(""" """, end="""""" )
def __lowerCamelCase ( __snake_case : List[Any] ) -> Dict:
"""simple docstring"""
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(lowerCAmelCase__ ) # upper half
reverse_floyd(lowerCAmelCase__ ) # lower half
if __name__ == "__main__":
print(r'| /\ | |- | |- |--| |\ /| |-')
print(r'|/ \| |- |_ |_ |__| | \/ | |_')
__snake_case : Optional[Any] = 1
while K:
__snake_case : int = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__snake_case : Dict = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 359 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __snake_case : list[int] ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) == len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 0 |
from collections import defaultdict
def lowercase_ ( _A : int ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : Dict = True
for v in tree[start]:
if v not in visited:
ret += dfs(_A )
if ret % 2 == 0:
cuts.append(_A )
return ret
def lowercase_ ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
A, A : Tuple = 10, 9
A : int = defaultdict(list)
A : dict[int, bool] = {}
A : list[int] = []
A : List[str] = 0
A : Tuple = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 184 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Optional[Any] = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "xlm-roberta"
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any]=30522 , __lowerCamelCase : List[Any]=768 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Dict=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Dict="absolute" , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=None , **__lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Any = position_embedding_type
lowerCamelCase__ : Any = use_cache
lowerCamelCase__ : Any = classifier_dropout
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 184 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Union[str, Any] = ProphetNetTokenizer
__lowerCAmelCase :Any = False
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
a__ : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : Any = """UNwant\u00E9d,running"""
a__ : Dict = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Tuple = self.tokenizer_class(self.vocab_file )
a__ : int = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : int = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : str = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : List[str] = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : str = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowercase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
a__ : Dict = {}
for i, token in enumerate(__lowercase ):
a__ : Optional[Any] = i
a__ : str = WordpieceTokenizer(vocab=__lowercase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
a__ : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a__ : Optional[Any] = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
a__ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
a__ : Optional[int] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
a__ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=__lowercase )
a__ : str = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__lowercase )
a__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__lowercase )
a__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 350 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case__ (ctypes.Structure ):
"""simple docstring"""
__lowerCAmelCase :Dict = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
a__ : int = CursorInfo()
a__ : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
a__ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
elif os.name == "posix":
sys.stdout.write("""\033[?25l""")
sys.stdout.flush()
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
if os.name == "nt":
a__ : List[Any] = CursorInfo()
a__ : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
a__ : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
elif os.name == "posix":
sys.stdout.write("""\033[?25h""")
sys.stdout.flush()
@contextmanager
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 266 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 3 ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(__SCREAMING_SNAKE_CASE ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
lowercase = QuantumRegister(__SCREAMING_SNAKE_CASE , 'qr' )
lowercase = ClassicalRegister(__SCREAMING_SNAKE_CASE , 'cr' )
lowercase = QuantumCircuit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = number_of_qubits
for i in range(__SCREAMING_SNAKE_CASE ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__SCREAMING_SNAKE_CASE ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__SCREAMING_SNAKE_CASE , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# simulate with 10000 shots
lowercase = Aer.get_backend('qasm_simulator' )
lowercase = execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=1_0000 )
return job.result().get_counts(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 195 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = """blip_2_vision_model"""
def __init__( self , snake_case=1408 , snake_case=6144 , snake_case=39 , snake_case=16 , snake_case=224 , snake_case=14 , snake_case="gelu" , snake_case=0.00_001 , snake_case=0.0 , snake_case=1E-10 , snake_case=True , **snake_case , ):
super().__init__(**snake_case )
lowercase = hidden_size
lowercase = intermediate_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = patch_size
lowercase = image_size
lowercase = initializer_range
lowercase = attention_dropout
lowercase = layer_norm_eps
lowercase = hidden_act
lowercase = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , **snake_case ):
cls._set_token_in_kwargs(snake_case )
lowercase , lowercase = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowercase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = """blip_2_qformer"""
def __init__( self , snake_case=3_0522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=0.02 , snake_case=1E-12 , snake_case=0 , snake_case="absolute" , snake_case=2 , snake_case=1408 , **snake_case , ):
super().__init__(pad_token_id=snake_case , **snake_case )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = cross_attention_frequency
lowercase = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , **snake_case ):
cls._set_token_in_kwargs(snake_case )
lowercase , lowercase = cls.get_config_dict(snake_case , **snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowercase = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """blip-2"""
_UpperCamelCase : str = True
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case=32 , **snake_case ):
super().__init__(**snake_case )
if vision_config is None:
lowercase = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
lowercase = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
lowercase = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
lowercase = BlipaVisionConfig(**snake_case )
lowercase = BlipaQFormerConfig(**snake_case )
lowercase = text_config['model_type'] if 'model_type' in text_config else 'opt'
lowercase = CONFIG_MAPPING[text_model_type](**snake_case )
lowercase = self.text_config.tie_word_embeddings
lowercase = self.text_config.is_encoder_decoder
lowercase = num_query_tokens
lowercase = self.vision_config.hidden_size
lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase = 1.0
lowercase = 0.02
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case , snake_case , **snake_case , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **snake_case , )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.vision_config.to_dict()
lowercase = self.qformer_config.to_dict()
lowercase = self.text_config.to_dict()
lowercase = self.__class__.model_type
return output
| 195 | 1 |
# using dfs for finding eulerian path traversal
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=None ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ : Dict = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
UpperCAmelCase_ : Any = True, True
UpperCAmelCase_ : Optional[Any] = dfs(_lowercase , _lowercase , _lowercase , _lowercase )
return path
def lowerCamelCase__ ( _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int = -1
for i in range(_lowercase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
UpperCAmelCase_ : List[str] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCamelCase__ ( _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ : Any = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
UpperCAmelCase_ : str = check_circuit_or_path(_lowercase , _lowercase )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
UpperCAmelCase_ : Union[str, Any] = 1
if check == 2:
UpperCAmelCase_ : Dict = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
UpperCAmelCase_ : Tuple = dfs(_lowercase , _lowercase , _lowercase )
print(_lowercase )
def lowerCamelCase__ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase_ : str = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
UpperCAmelCase_ : List[Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
UpperCAmelCase_ : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
UpperCAmelCase_ : Tuple = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
UpperCAmelCase_ : Optional[int] = {
1: [],
2: []
# all degree is zero
}
UpperCAmelCase_ : Optional[int] = 10
check_euler(_lowercase , _lowercase )
check_euler(_lowercase , _lowercase )
check_euler(_lowercase , _lowercase )
check_euler(_lowercase , _lowercase )
check_euler(_lowercase , _lowercase )
if __name__ == "__main__":
main() | 353 |
from math import ceil
def lowerCamelCase__ ( _lowercase = 1001 ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase_ : List[Any] = 2 * i + 1
UpperCAmelCase_ : Optional[int] = 2 * i
UpperCAmelCase_ : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__a = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number') | 235 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( __A , unittest.TestCase ):
snake_case__ : Any = MgpstrTokenizer
snake_case__ : Optional[Any] = False
snake_case__ : Any = {}
snake_case__ : Union[str, Any] = False
def _A ( self : Tuple ):
super().setUp()
# fmt: off
UpperCamelCase :Optional[int] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCamelCase :Tuple = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
UpperCamelCase :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowercase ) + """\n""" )
def _A ( self : Optional[int] , **__lowerCamelCase : List[Any] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _A ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] ):
UpperCamelCase :Optional[int] = """tester"""
UpperCamelCase :Dict = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def _A ( self : Dict ):
pass
def _A ( self : Optional[int] ):
UpperCamelCase :List[str] = self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase :Tuple = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCamelCase :Optional[int] = tokenizer.encode([special_token] , add_special_tokens=__lowercase )
self.assertEqual(len(__lowercase ) , 1 )
UpperCamelCase :Dict = tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
self.assertTrue(special_token not in decoded )
def _A ( self : List[Any] ):
UpperCamelCase :Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase , UpperCamelCase :Optional[Any] = self.get_input_output_texts(__lowercase )
UpperCamelCase :Tuple = tokenizer.tokenize(__lowercase )
UpperCamelCase :List[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
UpperCamelCase :Union[str, Any] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
UpperCamelCase :Optional[int] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertNotEqual(len(__lowercase ) , 0 )
UpperCamelCase :Optional[int] = tokenizer.decode(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , __lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def _A ( self : str ):
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def _A ( self : Union[str, Any] ):
pass
| 38 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCAmelCase = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
__lowerCAmelCase = F"""{src_lang}-{tgt_lang}"""
__lowerCAmelCase = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase)
__lowerCAmelCase = os.path.join(lowerCamelCase, '''README.md''')
print(F"""Generating {path}""")
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.write(lowerCamelCase)
# make sure we are under the root of the project
_UpperCAmelCase : Dict = Path(__file__).resolve().parent.parent.parent
_UpperCAmelCase : Optional[int] = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = model_name.split("""-""")
_UpperCAmelCase : Union[str, Any] = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 174 | 0 |
'''simple docstring'''
import argparse
a_ : int = """docs/source/_static/js/custom.js"""
def a_ ( __snake_case : Tuple ) -> List[str]:
"""simple docstring"""
with open(__snake_case , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
lowerCamelCase_ =0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
lowerCamelCase_ =F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
a_ : str = parser.parse_args()
update_custom_js(args.version)
| 6 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 | 1 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = torch.nn.Linear(10 , 10 )
UpperCamelCase__ = torch.optim.SGD(model.parameters() , 0.1 )
UpperCamelCase__ = Accelerator()
UpperCamelCase__ = accelerator.prepare(a )
try:
pickle.loads(pickle.dumps(a ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 80 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309 | 0 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
a :Tuple = logging.get_logger(__name__)
a :int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a :Optional[int] = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
a :Tuple = {
"Salesforce/codegen-350M-mono": 2_048,
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :Any = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE :Any = CodeGenTokenizer
def __init__( self , _a=None , _a=None , _a=None , _a="<|endoftext|>" , _a="<|endoftext|>" , _a="<|endoftext|>" , _a=False , **_a , ) -> List[Any]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , add_prefix_space=_a , **_a , )
if kwargs.pop("""add_bos_token""" , _a ):
SCREAMING_SNAKE_CASE__ : int = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
f'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
SCREAMING_SNAKE_CASE__ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _a ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(_a , pre_tok_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = add_prefix_space
SCREAMING_SNAKE_CASE__ : int = pre_tok_class(**_a )
SCREAMING_SNAKE_CASE__ : Dict = add_prefix_space
def _a ( self , *_a , **_a ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = kwargs.get("""is_split_into_words""" , _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def _a ( self , *_a , **_a ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.get("""is_split_into_words""" , _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def _a ( self , _a , _a = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def _a ( self , _a , _a = False , _a = None , _a = None , **_a , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = super().decode(
token_ids=_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , **_a , )
if truncate_before_pattern is not None and len(_a ) > 0:
SCREAMING_SNAKE_CASE__ : Dict = self.truncate(_a , _a )
return decoded_text
def _a ( self , _a , _a ) -> Dict:
"""simple docstring"""
def find_re(_a , _a , _a ):
SCREAMING_SNAKE_CASE__ : Tuple = pattern.search(_a , _a )
return m.start() if m else -1
SCREAMING_SNAKE_CASE__ : Optional[Any] = [re.compile(_a , re.MULTILINE ) for pattern in truncate_before_pattern]
SCREAMING_SNAKE_CASE__ : Optional[int] = list(re.finditer("""^print""" , _a , re.MULTILINE ) )
if len(_a ) > 1:
SCREAMING_SNAKE_CASE__ : str = completion[: prints[1].start()]
SCREAMING_SNAKE_CASE__ : Any = list(re.finditer("""^def""" , _a , re.MULTILINE ) )
if len(_a ) > 1:
SCREAMING_SNAKE_CASE__ : int = completion[: defs[1].start()]
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : int = [
pos for pos in [find_re(_a , _a , _a ) for terminal in terminals] if pos != -1
]
if len(_a ) > 0:
return completion[: min(_a )]
else:
return completion
| 56 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number | (1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number & ~(1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number ^ (1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
return ((number >> position) & 1) == 1
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 1 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def A_ ( _lowercase = "isbn/0140328726" ):
'''simple docstring'''
snake_case_ :str = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
snake_case_ :Any = f"""{olid} is not a valid Open Library olid"""
raise ValueError(_lowercase )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Tuple = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
snake_case_ :Any = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
snake_case_ :Optional[int] = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
snake_case_ :Optional[int] = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(_lowercase, _lowercase ):
snake_case_ :Tuple = """, """.join(_lowercase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__a = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
__a = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print("\n".join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 66 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCamelCase = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowerCamelCase = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowerCamelCase = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : int=False):
'''simple docstring'''
__lowercase =spearmanr(_lowerCAmelCase , _lowerCAmelCase)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 166 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__A : Optional[int] = NewType('DataClass', Any)
__A : Union[str, Any] = NewType('DataClassType', Any)
def __UpperCamelCase ( _A : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
if isinstance(_A , _A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def __UpperCamelCase ( _A : list ) ->Callable[[str], Any]:
"""simple docstring"""
lowerCamelCase_ ={str(_A ): choice for choice in choices}
return lambda _A : str_to_choice.get(_A , _A )
def __UpperCamelCase ( *,
_A : Union[str, List[str]] = None , _A : str = None , _A : Any = dataclasses.MISSING , _A : Callable[[], Any] = dataclasses.MISSING , _A : dict = None , **_A : Optional[int] , ) ->dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCamelCase_ ={}
if aliases is not None:
lowerCamelCase_ =aliases
if help is not None:
lowerCamelCase_ =help
return dataclasses.field(metadata=_A , default=_A , default_factory=_A , **_A )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Iterable[DataClassType]
def __init__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Dict:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCamelCase_ =ArgumentDefaultsHelpFormatter
super().__init__(**_SCREAMING_SNAKE_CASE )
if dataclasses.is_dataclass(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =[dataclass_types]
lowerCamelCase_ =list(_SCREAMING_SNAKE_CASE )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_SCREAMING_SNAKE_CASE )
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =f'--{field.name}'
lowerCamelCase_ =field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _SCREAMING_SNAKE_CASE ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
lowerCamelCase_ =kwargs.pop("""aliases""" , [] )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =[aliases]
lowerCamelCase_ =getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(_SCREAMING_SNAKE_CASE , """UnionType""" ) and isinstance(_SCREAMING_SNAKE_CASE , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_SCREAMING_SNAKE_CASE ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f' Problem encountered in field \'{field.name}\'.' )
if type(_SCREAMING_SNAKE_CASE ) not in field.type.__args__:
# filter `str` in Union
lowerCamelCase_ =field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCamelCase_ =getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCamelCase_ =(
field.type.__args__[0] if isinstance(_SCREAMING_SNAKE_CASE , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCamelCase_ =getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCamelCase_ ={}
if origin_type is Literal or (isinstance(field.type , _SCREAMING_SNAKE_CASE ) and issubclass(field.type , _SCREAMING_SNAKE_CASE )):
if origin_type is Literal:
lowerCamelCase_ =field.type.__args__
else:
lowerCamelCase_ =[x.value for x in field.type]
lowerCamelCase_ =make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
lowerCamelCase_ =field.default
else:
lowerCamelCase_ =True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCamelCase_ =copy(_SCREAMING_SNAKE_CASE )
# Hack because type=bool in argparse does not behave as we want.
lowerCamelCase_ =string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCamelCase_ =False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCamelCase_ =default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCamelCase_ ="""?"""
# This is the value that will get picked if we do --field_name (without value)
lowerCamelCase_ =True
elif isclass(_SCREAMING_SNAKE_CASE ) and issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =field.type.__args__[0]
lowerCamelCase_ ="""+"""
if field.default_factory is not dataclasses.MISSING:
lowerCamelCase_ =field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCamelCase_ =True
else:
lowerCamelCase_ =field.type
if field.default is not dataclasses.MISSING:
lowerCamelCase_ =field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCamelCase_ =field.default_factory()
else:
lowerCamelCase_ =True
parser.add_argument(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCamelCase_ =False
parser.add_argument(f'--no_{field.name}' , action="""store_false""" , dest=field.name , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> str:
if hasattr(_SCREAMING_SNAKE_CASE , """_argument_group_name""" ):
lowerCamelCase_ =self.add_argument_group(dtype._argument_group_name )
else:
lowerCamelCase_ =self
try:
lowerCamelCase_ =get_type_hints(_SCREAMING_SNAKE_CASE )
except NameError:
raise RuntimeError(
f'Type resolution failed for {dtype}. Try declaring the class in global scope or '
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =""".""".join(map(_SCREAMING_SNAKE_CASE , sys.version_info[:3] ) )
raise RuntimeError(
f'Type resolution failed for {dtype} on Python {python_version}. Try removing '
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(_SCREAMING_SNAKE_CASE ):
if not field.init:
continue
lowerCamelCase_ =type_hints[field.name]
self._parse_dataclass_field(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , )-> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCamelCase_ =[]
if args_filename:
args_files.append(Path(_SCREAMING_SNAKE_CASE ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCamelCase_ =ArgumentParser()
args_file_parser.add_argument(_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCamelCase_ , lowerCamelCase_ =args_file_parser.parse_known_args(args=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =vars(_SCREAMING_SNAKE_CASE ).get(args_file_flag.lstrip("""-""" ) , _SCREAMING_SNAKE_CASE )
if cmd_args_file_paths:
args_files.extend([Path(_SCREAMING_SNAKE_CASE ) for p in cmd_args_file_paths] )
lowerCamelCase_ =[]
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCamelCase_ =file_args + args if args is not None else file_args + sys.argv[1:]
lowerCamelCase_ , lowerCamelCase_ =self.parse_known_args(args=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[]
for dtype in self.dataclass_types:
lowerCamelCase_ ={f.name for f in dataclasses.fields(_SCREAMING_SNAKE_CASE ) if f.init}
lowerCamelCase_ ={k: v for k, v in vars(_SCREAMING_SNAKE_CASE ).items() if k in keys}
for k in keys:
delattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =dtype(**_SCREAMING_SNAKE_CASE )
outputs.append(_SCREAMING_SNAKE_CASE )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_SCREAMING_SNAKE_CASE )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False )-> Tuple[DataClass, ...]:
lowerCamelCase_ =set(args.keys() )
lowerCamelCase_ =[]
for dtype in self.dataclass_types:
lowerCamelCase_ ={f.name for f in dataclasses.fields(_SCREAMING_SNAKE_CASE ) if f.init}
lowerCamelCase_ ={k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCamelCase_ =dtype(**_SCREAMING_SNAKE_CASE )
outputs.append(_SCREAMING_SNAKE_CASE )
if not allow_extra_keys and unused_keys:
raise ValueError(f'Some keys are not used by the HfArgumentParser: {sorted(_SCREAMING_SNAKE_CASE )}' )
return tuple(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False )-> Tuple[DataClass, ...]:
with open(Path(_SCREAMING_SNAKE_CASE ) , encoding="""utf-8""" ) as open_json_file:
lowerCamelCase_ =json.loads(open_json_file.read() )
lowerCamelCase_ =self.parse_dict(_SCREAMING_SNAKE_CASE , allow_extra_keys=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False )-> Tuple[DataClass, ...]:
lowerCamelCase_ =self.parse_dict(yaml.safe_load(Path(_SCREAMING_SNAKE_CASE ).read_text() ) , allow_extra_keys=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 49 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__A : Union[str, Any] = 'src/diffusers'
# Matches is_xxx_available()
__A : Dict = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
__A : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
__A : Optional[Any] = '\n{0} = None\n'
__A : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
__A : Tuple = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def __UpperCamelCase ( _A : Optional[Any] ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(_A )
if len(_A ) == 0:
return None
return "_and_".join(_A )
def __UpperCamelCase ( ) ->Optional[int]:
"""simple docstring"""
with open(os.path.join(_A , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(_A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(_A ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_A ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def __UpperCamelCase ( _A : Union[str, Any] , _A : int ) ->Optional[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_A )
elif name.islower():
return DUMMY_FUNCTION.format(_A , _A )
else:
return DUMMY_CLASS.format(_A , _A )
def __UpperCamelCase ( _A : Any=None ) ->Any:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ="""[""" + """, """.join(f'"{b}"' for b in backend.split("""_and_""" ) ) + """]"""
lowerCamelCase_ ="""# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_A , _A ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def __UpperCamelCase ( _A : Dict=False ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(_A , """utils""" )
lowerCamelCase_ ={
backend: os.path.join(_A , f'dummy_{short_names.get(_A , _A )}_objects.py' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_A ):
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =""""""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'Updating diffusers.utils.dummy_{short_names.get(_A , _A )}_objects.py as the main '
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'diffusers.utils.dummy_{short_names.get(_A , _A )}_objects.py. Run `make fix-copies` '
"""to fix this.""" )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__A : str = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 49 | 1 |
from jiwer import compute_measures
import datasets
_SCREAMING_SNAKE_CASE = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
_SCREAMING_SNAKE_CASE = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
_SCREAMING_SNAKE_CASE = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False ) -> int:
if concatenate_texts:
return compute_measures(UpperCamelCase__ , UpperCamelCase__ )["wer"]
else:
_A = 0
_A = 0
for prediction, reference in zip(UpperCamelCase__ , UpperCamelCase__ ):
_A = compute_measures(UpperCamelCase__ , UpperCamelCase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 180 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''', UpperCamelCase__, )
super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
| 278 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = 'ResNetConfig'
# Base docstring
__UpperCAmelCase = 'microsoft/resnet-50'
__UpperCAmelCase = [1, 20_48, 7, 7]
# Image classification docstring
__UpperCAmelCase = 'microsoft/resnet-50'
__UpperCAmelCase = 'tiger cat'
__UpperCAmelCase = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __a ( nn.Module ):
def __init__( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "relu" ):
super().__init__()
lowerCAmelCase_ : List[Any] = nn.Convad(
UpperCAmelCase , UpperCAmelCase , kernel_size=UpperCAmelCase , stride=UpperCAmelCase , padding=kernel_size // 2 , bias=UpperCAmelCase )
lowerCAmelCase_ : str = nn.BatchNormad(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Tuple , UpperCAmelCase : Tensor ):
lowerCAmelCase_ : Optional[Any] = self.convolution(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = self.normalization(UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = self.activation(UpperCAmelCase )
return hidden_state
class __a ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : ResNetConfig ):
super().__init__()
lowerCAmelCase_ : Union[str, Any] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowerCAmelCase_ : List[Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowerCAmelCase_ : List[Any] = config.num_channels
def A ( self : Dict , UpperCAmelCase : Tensor ):
lowerCAmelCase_ : Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowerCAmelCase_ : Any = self.embedder(UpperCAmelCase )
lowerCAmelCase_ : Dict = self.pooler(UpperCAmelCase )
return embedding
class __a ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 2 ):
super().__init__()
lowerCAmelCase_ : Union[str, Any] = nn.Convad(UpperCAmelCase , UpperCAmelCase , kernel_size=1 , stride=UpperCAmelCase , bias=UpperCAmelCase )
lowerCAmelCase_ : Tuple = nn.BatchNormad(UpperCAmelCase )
def A ( self : Optional[Any] , UpperCAmelCase : Tensor ):
lowerCAmelCase_ : Any = self.convolution(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = self.normalization(UpperCAmelCase )
return hidden_state
class __a ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "relu" ):
super().__init__()
lowerCAmelCase_ : Optional[int] = in_channels != out_channels or stride != 1
lowerCAmelCase_ : str = (
ResNetShortCut(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase_ : List[Any] = nn.Sequential(
ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , activation=UpperCAmelCase ) , )
lowerCAmelCase_ : int = ACTaFN[activation]
def A ( self : List[Any] , UpperCAmelCase : str ):
lowerCAmelCase_ : List[Any] = hidden_state
lowerCAmelCase_ : List[str] = self.layer(UpperCAmelCase )
lowerCAmelCase_ : str = self.shortcut(UpperCAmelCase )
hidden_state += residual
lowerCAmelCase_ : Dict = self.activation(UpperCAmelCase )
return hidden_state
class __a ( nn.Module ):
def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "relu" , UpperCAmelCase : int = 4 ):
super().__init__()
lowerCAmelCase_ : Optional[int] = in_channels != out_channels or stride != 1
lowerCAmelCase_ : Dict = out_channels // reduction
lowerCAmelCase_ : Optional[Any] = (
ResNetShortCut(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase_ : List[str] = nn.Sequential(
ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , kernel_size=1 ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , kernel_size=1 , activation=UpperCAmelCase ) , )
lowerCAmelCase_ : List[str] = ACTaFN[activation]
def A ( self : Tuple , UpperCAmelCase : Any ):
lowerCAmelCase_ : Any = hidden_state
lowerCAmelCase_ : Optional[Any] = self.layer(UpperCAmelCase )
lowerCAmelCase_ : str = self.shortcut(UpperCAmelCase )
hidden_state += residual
lowerCAmelCase_ : Tuple = self.activation(UpperCAmelCase )
return hidden_state
class __a ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : ResNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , ):
super().__init__()
lowerCAmelCase_ : Dict = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
lowerCAmelCase_ : Union[str, Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase , activation=config.hidden_act ) , *[layer(UpperCAmelCase , UpperCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[Any] , UpperCAmelCase : Tensor ):
lowerCAmelCase_ : Optional[Any] = input
for layer in self.layers:
lowerCAmelCase_ : Dict = layer(UpperCAmelCase )
return hidden_state
class __a ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase : ResNetConfig ):
super().__init__()
lowerCAmelCase_ : Tuple = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCAmelCase_ : str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , depth=UpperCAmelCase ) )
def A ( self : Optional[Any] , UpperCAmelCase : Tensor , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True ):
lowerCAmelCase_ : List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase_ : List[str] = hidden_states + (hidden_state,)
lowerCAmelCase_ : Union[str, Any] = stage_module(UpperCAmelCase )
if output_hidden_states:
lowerCAmelCase_ : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase , hidden_states=UpperCAmelCase , )
class __a ( __UpperCamelCase ):
__snake_case : Dict = ResNetConfig
__snake_case : str = """resnet"""
__snake_case : List[Any] = """pixel_values"""
__snake_case : Optional[int] = True
def A ( self : str , UpperCAmelCase : Dict ):
if isinstance(UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict=False ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : str = value
__UpperCAmelCase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__UpperCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" ,__UpperCamelCase ,)
class __a ( __UpperCamelCase ):
def __init__( self : Dict , UpperCAmelCase : str ):
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : Dict = config
lowerCAmelCase_ : Optional[int] = ResNetEmbeddings(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = ResNetEncoder(UpperCAmelCase )
lowerCAmelCase_ : Any = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : List[Any] , UpperCAmelCase : Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None ):
lowerCAmelCase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : List[str] = self.embedder(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = self.encoder(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = encoder_outputs[0]
lowerCAmelCase_ : Dict = self.pooler(UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase , pooler_output=UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,__UpperCamelCase ,)
class __a ( __UpperCamelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : str = config.num_labels
lowerCAmelCase_ : Any = ResNetModel(UpperCAmelCase )
# classification head
lowerCAmelCase_ : Optional[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[torch.LongTensor] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , ):
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : str = self.resnet(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase_ : Any = self.classifier(UpperCAmelCase )
lowerCAmelCase_ : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase_ : int = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase_ : Dict = """single_label_classification"""
else:
lowerCAmelCase_ : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
lowerCAmelCase_ : Tuple = MSELoss()
if self.num_labels == 1:
lowerCAmelCase_ : Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase_ : int = loss_fct(UpperCAmelCase , UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase_ : Tuple = CrossEntropyLoss()
lowerCAmelCase_ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase_ : Union[str, Any] = BCEWithLogitsLoss()
lowerCAmelCase_ : Optional[Any] = loss_fct(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
lowerCAmelCase_ : Any = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" ,__UpperCamelCase ,)
class __a ( __UpperCamelCase ,__UpperCamelCase ):
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any] ):
super().__init__(UpperCAmelCase )
super()._init_backbone(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = [config.embedding_size] + config.hidden_sizes
lowerCAmelCase_ : List[str] = ResNetEmbeddings(UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = ResNetEncoder(UpperCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@replace_return_docstrings(output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def A ( self : List[str] , UpperCAmelCase : Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None ):
lowerCAmelCase_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Union[str, Any] = self.embedder(UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.encoder(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
lowerCAmelCase_ : int = outputs.hidden_states
lowerCAmelCase_ : Optional[int] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCAmelCase_ : Tuple = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCAmelCase , )
| 28 |
from __future__ import annotations
from typing import Any
class __a :
def __init__( self : Dict , UpperCAmelCase : int = 6 ):
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
self.create_linked_list(UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : int = current_node
lowerCAmelCase_ : str = current_node
lowerCAmelCase_ : Union[str, Any] = current_node
for _ in range(1 , UpperCAmelCase ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : Dict = current_node
lowerCAmelCase_ : Optional[int] = previous_node
lowerCAmelCase_ : Optional[Any] = current_node
lowerCAmelCase_ : List[str] = self.front
lowerCAmelCase_ : Optional[int] = previous_node
def A ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A ( self : List[str] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def A ( self : Optional[int] , UpperCAmelCase : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase_ : int = self.rear.next
if self.rear:
lowerCAmelCase_ : Union[str, Any] = data
def A ( self : List[Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase_ : int = self.front.data
lowerCAmelCase_ : Optional[Any] = None
return data
lowerCAmelCase_ : Optional[int] = self.front
lowerCAmelCase_ : Any = old_front.next
lowerCAmelCase_ : Tuple = old_front.data
lowerCAmelCase_ : str = None
return data
def A ( self : Tuple ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def A ( self : List[str] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __a :
def __init__( self : Any ):
lowerCAmelCase_ : Any | None = None
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
a_ = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( A__ ):
snake_case_ = """deta"""
snake_case_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Tuple , __lowercase : Optional[int]=None , __lowercase : int=9_00 , __lowercase : Optional[Any]=20_48 , __lowercase : int=6 , __lowercase : Tuple=20_48 , __lowercase : Optional[int]=8 , __lowercase : Any=6 , __lowercase : str=10_24 , __lowercase : int=8 , __lowercase : int=0.0 , __lowercase : Optional[Any]=True , __lowercase : Tuple="relu" , __lowercase : Union[str, Any]=2_56 , __lowercase : Tuple=0.1 , __lowercase : str=0.0 , __lowercase : Dict=0.0 , __lowercase : Tuple=0.02 , __lowercase : Union[str, Any]=1.0 , __lowercase : Any=True , __lowercase : Tuple=False , __lowercase : List[Any]="sine" , __lowercase : str=5 , __lowercase : List[Any]=4 , __lowercase : str=4 , __lowercase : Union[str, Any]=True , __lowercase : Optional[int]=3_00 , __lowercase : Dict=True , __lowercase : List[Any]=True , __lowercase : List[Any]=1 , __lowercase : List[str]=5 , __lowercase : int=2 , __lowercase : Dict=1 , __lowercase : str=1 , __lowercase : Optional[Any]=5 , __lowercase : Union[str, Any]=2 , __lowercase : List[str]=0.1 , __lowercase : List[Any]=0.25 , **__lowercase : Union[str, Any] , ) -> List[str]:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE__ : Dict =CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Dict =backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE__ : str =CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : List[str] =config_class.from_dict(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =backbone_config
SCREAMING_SNAKE_CASE__ : Tuple =num_queries
SCREAMING_SNAKE_CASE__ : Optional[int] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : int =d_model
SCREAMING_SNAKE_CASE__ : str =encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : int =encoder_layers
SCREAMING_SNAKE_CASE__ : List[Any] =encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Dict =decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_layers
SCREAMING_SNAKE_CASE__ : int =decoder_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] =dropout
SCREAMING_SNAKE_CASE__ : Any =attention_dropout
SCREAMING_SNAKE_CASE__ : Tuple =activation_dropout
SCREAMING_SNAKE_CASE__ : Dict =activation_function
SCREAMING_SNAKE_CASE__ : Union[str, Any] =init_std
SCREAMING_SNAKE_CASE__ : Tuple =init_xavier_std
SCREAMING_SNAKE_CASE__ : str =encoder_layerdrop
SCREAMING_SNAKE_CASE__ : str =auxiliary_loss
SCREAMING_SNAKE_CASE__ : Dict =position_embedding_type
# deformable attributes
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_feature_levels
SCREAMING_SNAKE_CASE__ : Union[str, Any] =encoder_n_points
SCREAMING_SNAKE_CASE__ : str =decoder_n_points
SCREAMING_SNAKE_CASE__ : Optional[int] =two_stage
SCREAMING_SNAKE_CASE__ : int =two_stage_num_proposals
SCREAMING_SNAKE_CASE__ : Union[str, Any] =with_box_refine
SCREAMING_SNAKE_CASE__ : List[str] =assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Tuple =class_cost
SCREAMING_SNAKE_CASE__ : Tuple =bbox_cost
SCREAMING_SNAKE_CASE__ : Union[str, Any] =giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Dict =mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : str =dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[Any] =bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[Any] =giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : Any =eos_coefficient
SCREAMING_SNAKE_CASE__ : Optional[int] =focal_alpha
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __magic_name__ ( self : Dict ) -> int:
return self.encoder_attention_heads
@property
def __magic_name__ ( self : List[Any] ) -> int:
return self.d_model
def __magic_name__ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[str] =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : List[str] =self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : Tuple =self.__class__.model_type
return output | 152 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : List[Any] , _a : List[str]=7 , _a : List[str]=3 , _a : Tuple=18 , _a : Tuple=30 , _a : str=400 , _a : Tuple=None , _a : Union[str, Any]=True , _a : List[str]=True , _a : Optional[int]=None , ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =size if size is not None else {'height': 20, 'width': 20}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =do_convert_rgb
_SCREAMING_SNAKE_CASE =[512, 1024, 2048, 4096]
_SCREAMING_SNAKE_CASE =patch_size if patch_size is not None else {'height': 16, 'width': 16}
def A ( self : Any ) -> List[str]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A ( self : int ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_SCREAMING_SNAKE_CASE =Image.open(requests.get(_a , stream=_a ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
A__ = PixaStructImageProcessor if is_vision_available() else None
def A ( self : Dict ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self )
@property
def A ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'do_convert_rgb' ) )
def A ( self : Any ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_dummy_image()
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
_SCREAMING_SNAKE_CASE =2048
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='pt' , max_patches=_a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def A ( self : Any ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_SCREAMING_SNAKE_CASE =True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
_SCREAMING_SNAKE_CASE ='Hello'
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : List[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
A__ = PixaStructImageProcessor if is_vision_available() else None
def A ( self : str ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self , num_channels=4 )
_SCREAMING_SNAKE_CASE =3
@property
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'do_convert_rgb' ) )
def A ( self : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 47 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = "scheduler_config.json"
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : List[Any] = 1
snake_case : str = 2
snake_case : Dict = 3
snake_case : Any = 4
snake_case : str = 5
@dataclass
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : jnp.ndarray
class __SCREAMING_SNAKE_CASE :
snake_case : Optional[Any] = SCHEDULER_CONFIG_NAME
snake_case : Tuple = ["""dtype"""]
snake_case : List[str] = []
snake_case : str = True
@classmethod
def _lowerCamelCase ( cls , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , **__lowerCAmelCase , ):
UpperCamelCase__ , UpperCamelCase__ = cls.load_config(
pretrained_model_name_or_path=__lowerCAmelCase , subfolder=__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCamelCase__ , UpperCamelCase__ = cls.from_config(__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase , **__lowerCAmelCase )
if hasattr(__lowerCAmelCase , """create_state""" ) and getattr(__lowerCAmelCase , """has_state""" , __lowerCAmelCase ):
UpperCamelCase__ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = False , **__lowerCAmelCase ):
self.save_config(save_directory=__lowerCAmelCase , push_to_hub=__lowerCAmelCase , **__lowerCAmelCase )
@property
def _lowerCamelCase ( self ):
return self._get_compatibles()
@classmethod
def _lowerCamelCase ( cls ):
UpperCamelCase__ = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase__ = importlib.import_module(__name__.split(""".""" )[0] )
UpperCamelCase__ = [
getattr(__lowerCAmelCase , __lowerCAmelCase ) for c in compatible_classes_str if hasattr(__lowerCAmelCase , __lowerCAmelCase )
]
return compatible_classes
def _UpperCamelCase (a__ :jnp.ndarray , a__ :Tuple[int] ):
"""simple docstring"""
assert len(a__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(a__ ) - x.ndim) ) , a__ )
def _UpperCamelCase (a__ :int , a__ :str=0.999 , a__ :List[Any]=jnp.floataa ):
"""simple docstring"""
def alpha_bar(a__ :List[str] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
UpperCamelCase__ = []
for i in range(a__ ):
UpperCamelCase__ = i / num_diffusion_timesteps
UpperCamelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(a__ ) / alpha_bar(a__ ) , a__ ) )
return jnp.array(a__ , dtype=a__ )
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE :
snake_case : jnp.ndarray
snake_case : jnp.ndarray
snake_case : jnp.ndarray
@classmethod
def _lowerCamelCase ( cls , __lowerCAmelCase ):
UpperCamelCase__ = scheduler.config
if config.trained_betas is not None:
UpperCamelCase__ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCamelCase__ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase__ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase__ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
UpperCamelCase__ = 1.0 - betas
UpperCamelCase__ = jnp.cumprod(__lowerCAmelCase , axis=0 )
return cls(
alphas=__lowerCAmelCase , betas=__lowerCAmelCase , alphas_cumprod=__lowerCAmelCase , )
def _UpperCamelCase (a__ :CommonSchedulerState , a__ :jnp.ndarray , a__ :jnp.ndarray , a__ :jnp.ndarray ):
"""simple docstring"""
UpperCamelCase__ = state.alphas_cumprod
UpperCamelCase__ = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase__ = sqrt_alpha_prod.flatten()
UpperCamelCase__ = broadcast_to_shape_from_left(a__ , original_samples.shape )
UpperCamelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase__ = sqrt_one_minus_alpha_prod.flatten()
UpperCamelCase__ = broadcast_to_shape_from_left(a__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _UpperCamelCase (a__ :CommonSchedulerState , a__ :jnp.ndarray , a__ :jnp.ndarray , a__ :jnp.ndarray ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = get_sqrt_alpha_prod(a__ , a__ , a__ , a__ )
UpperCamelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _UpperCamelCase (a__ :CommonSchedulerState , a__ :jnp.ndarray , a__ :jnp.ndarray , a__ :jnp.ndarray ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = get_sqrt_alpha_prod(a__ , a__ , a__ , a__ )
UpperCamelCase__ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 360 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=64 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = vocab_size - 1
def _lowerCamelCase ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = True
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = GPTNeoXModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = GPTNeoXForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
UpperCamelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
UpperCamelCase__ = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
snake_case : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case : Dict = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case : Tuple = False
snake_case : Dict = False
snake_case : Tuple = False
snake_case : Any = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = GPTNeoXModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=64 , num_attention_heads=8 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ = None
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _lowerCamelCase ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
original_model.to(__lowerCAmelCase )
original_model.eval()
UpperCamelCase__ = original_model(__lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = original_model(__lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = {"""type""": scaling_type, """factor""": 10.0}
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
scaled_model.to(__lowerCAmelCase )
scaled_model.eval()
UpperCamelCase__ = scaled_model(__lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = scaled_model(__lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCamelCase__ = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__lowerCAmelCase )
UpperCamelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowerCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCamelCase__ = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCamelCase__ = model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=20 )
UpperCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 87 | 0 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
# to overwrite at feature extractactor specific tests
a_ : int = None
a_ : Tuple = None
@property
def UpperCAmelCase__ ( self) ->str:
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(__UpperCAmelCase , "feature_size"))
self.assertTrue(hasattr(__UpperCAmelCase , "sampling_rate"))
self.assertTrue(hasattr(__UpperCAmelCase , "padding_value"))
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.feat_extract_tester.prepare_inputs_for_common()
a_ = self.feature_extraction_class(**self.feat_extract_dict)
a_ = feat_extract.model_input_names[0]
a_ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(__UpperCAmelCase) == len(__UpperCAmelCase) for x, y in zip(__UpperCAmelCase , processed_features[input_name])))
a_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCAmelCase)
a_ = BatchFeature({input_name: speech_inputs} , tensor_type="np")
a_ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
a_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCAmelCase)
a_ = self.feature_extraction_class(**self.feat_extract_dict)
a_ = feat_extract.model_input_names[0]
a_ = BatchFeature({input_name: speech_inputs} , tensor_type="pt")
a_ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
a_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase__ ( self) ->Any:
a_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCAmelCase)
a_ = self.feature_extraction_class(**self.feat_extract_dict)
a_ = feat_extract.model_input_names[0]
a_ = BatchFeature({input_name: speech_inputs} , tensor_type="tf")
a_ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
a_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase=False) ->Tuple:
def _inputs_have_equal_length(__UpperCAmelCase):
a_ = len(input[0])
for input_slice in input[1:]:
if len(__UpperCAmelCase) != length:
return False
return True
def _inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase):
if len(__UpperCAmelCase) != len(__UpperCAmelCase):
return False
for input_slice_a, input_slice_a in zip(__UpperCAmelCase , __UpperCAmelCase):
if not np.allclose(np.asarray(__UpperCAmelCase) , np.asarray(__UpperCAmelCase) , atol=1E-3):
return False
return True
a_ = self.feature_extraction_class(**self.feat_extract_dict)
a_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=__UpperCAmelCase)
a_ = feat_extract.model_input_names[0]
a_ = BatchFeature({input_name: speech_inputs})
a_ = self.feat_extract_tester.seq_length_diff
a_ = self.feat_extract_tester.max_seq_length + pad_diff
a_ = self.feat_extract_tester.min_seq_length
a_ = self.feat_extract_tester.batch_size
a_ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
a_ = feat_extract.pad(__UpperCAmelCase , padding=__UpperCAmelCase)
a_ = input_a[input_name]
a_ = feat_extract.pad(__UpperCAmelCase , padding="longest")
a_ = input_a[input_name]
a_ = feat_extract.pad(__UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[-1]))
a_ = input_a[input_name]
a_ = feat_extract.pad(__UpperCAmelCase , padding="longest" , return_tensors="np")
a_ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__UpperCAmelCase):
feat_extract.pad(__UpperCAmelCase , padding="max_length")[input_name]
a_ = feat_extract.pad(
__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , return_tensors="np")
a_ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase))
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase))
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase))
self.assertTrue(_inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
a_ = feat_extract.pad(__UpperCAmelCase , pad_to_multiple_of=10)
a_ = input_a[input_name]
a_ = feat_extract.pad(__UpperCAmelCase , padding="longest" , pad_to_multiple_of=10)
a_ = input_a[input_name]
a_ = feat_extract.pad(
__UpperCAmelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=__UpperCAmelCase)
a_ = input_a[input_name]
a_ = feat_extract.pad(
__UpperCAmelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=__UpperCAmelCase , return_tensors="np" , )
a_ = input_a[input_name]
self.assertTrue(all(len(__UpperCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase))
a_ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__UpperCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
a_ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase__ ( self , __UpperCAmelCase=False) ->List[Any]:
def _inputs_have_equal_length(__UpperCAmelCase):
a_ = len(input[0])
for input_slice in input[1:]:
if len(__UpperCAmelCase) != length:
return False
return True
def _inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase):
if len(__UpperCAmelCase) != len(__UpperCAmelCase):
return False
for input_slice_a, input_slice_a in zip(__UpperCAmelCase , __UpperCAmelCase):
if not np.allclose(np.asarray(__UpperCAmelCase) , np.asarray(__UpperCAmelCase) , atol=1E-3):
return False
return True
a_ = self.feature_extraction_class(**self.feat_extract_dict)
a_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=__UpperCAmelCase)
a_ = feat_extract.model_input_names[0]
a_ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
a_ = feat_extract.pad(
__UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[0]) , truncation=__UpperCAmelCase)
a_ = input_a[input_name]
a_ = feat_extract.pad(__UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[0]))
a_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase))
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase))
# truncate to smallest with np
a_ = feat_extract.pad(
__UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np" , truncation=__UpperCAmelCase , )
a_ = input_a[input_name]
a_ = feat_extract.pad(
__UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np")
a_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase))
# truncate to middle
a_ = feat_extract.pad(
__UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=__UpperCAmelCase , return_tensors="np" , )
a_ = input_a[input_name]
a_ = feat_extract.pad(
__UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=__UpperCAmelCase)
a_ = input_a[input_name]
a_ = feat_extract.pad(
__UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[1]) , return_tensors="np")
a_ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase))
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase))
self.assertTrue(_inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCAmelCase):
feat_extract.pad(__UpperCAmelCase , truncation=__UpperCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCAmelCase):
feat_extract.pad(__UpperCAmelCase , padding="longest" , truncation=__UpperCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCAmelCase):
feat_extract.pad(__UpperCAmelCase , padding="longest" , truncation=__UpperCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__UpperCAmelCase):
feat_extract.pad(__UpperCAmelCase , padding="max_length" , truncation=__UpperCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
a_ = 12
a_ = feat_extract.pad(
__UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=__UpperCAmelCase , truncation=__UpperCAmelCase , )
a_ = input_a[input_name]
a_ = feat_extract.pad(
__UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=__UpperCAmelCase , )
a_ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
a_ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
a_ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase))
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase))
def UpperCAmelCase__ ( self) ->Tuple:
self._check_padding(numpify=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Any:
self._check_padding(numpify=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
self._check_truncation(numpify=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
self._check_truncation(numpify=__UpperCAmelCase)
@require_torch
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.feature_extraction_class(**self.feat_extract_dict)
a_ = self.feat_extract_tester.prepare_inputs_for_common()
a_ = feat_extract.model_input_names[0]
a_ = BatchFeature({input_name: speech_inputs})
a_ = feat_extract.pad(__UpperCAmelCase , padding="longest" , return_tensors="np")[input_name]
a_ = feat_extract.pad(__UpperCAmelCase , padding="longest" , return_tensors="pt")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.feature_extraction_class(**self.feat_extract_dict)
a_ = self.feat_extract_tester.prepare_inputs_for_common()
a_ = feat_extract.model_input_names[0]
a_ = BatchFeature({input_name: speech_inputs})
a_ = feat_extract.pad(__UpperCAmelCase , padding="longest" , return_tensors="np")[input_name]
a_ = feat_extract.pad(__UpperCAmelCase , padding="longest" , return_tensors="tf")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase__ ( self) ->str:
a_ = self.feat_extract_dict
a_ = True
a_ = self.feature_extraction_class(**__UpperCAmelCase)
a_ = self.feat_extract_tester.prepare_inputs_for_common()
a_ = [len(__UpperCAmelCase) for x in speech_inputs]
a_ = feat_extract.model_input_names[0]
a_ = BatchFeature({input_name: speech_inputs})
a_ = feat_extract.pad(__UpperCAmelCase , padding="longest" , return_tensors="np")
self.assertIn("attention_mask" , __UpperCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.feat_extract_dict
a_ = True
a_ = self.feature_extraction_class(**__UpperCAmelCase)
a_ = self.feat_extract_tester.prepare_inputs_for_common()
a_ = [len(__UpperCAmelCase) for x in speech_inputs]
a_ = feat_extract.model_input_names[0]
a_ = BatchFeature({input_name: speech_inputs})
a_ = min(__UpperCAmelCase)
a_ = feat_extract.pad(
__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="np")
self.assertIn("attention_mask" , __UpperCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs]) | 243 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = args.log_outputs
lowerCAmelCase__ : Union[str, Any] = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
lowerCAmelCase__ : Dict = load_metric('wer' )
lowerCAmelCase__ : Tuple = load_metric('cer' )
# compute metrics
lowerCAmelCase__ : Dict = wer.compute(references=result['target'] , predictions=result['prediction'] )
lowerCAmelCase__ : int = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
lowerCAmelCase__ : Optional[int] = F'''WER: {wer_result}\nCER: {cer_result}'''
print(SCREAMING_SNAKE_CASE_ )
with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase__ : List[str] = F'''log_{dataset_id}_predictions.txt'''
lowerCAmelCase__ : Union[str, Any] = F'''log_{dataset_id}_targets.txt'''
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as p, open(SCREAMING_SNAKE_CASE_ , 'w' ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
p.write(F'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(SCREAMING_SNAKE_CASE_ , with_indices=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : List[str] = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase__ : Union[str, Any] = re.sub(SCREAMING_SNAKE_CASE_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase__ : List[Any] = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
lowerCAmelCase__ : Optional[int] = ' '.join(text.split(SCREAMING_SNAKE_CASE_ ) )
return text
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# load dataset
lowerCAmelCase__ : Tuple = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase__ : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase__ : Union[str, Any] = dataset.cast_column('audio' , Audio(sampling_rate=SCREAMING_SNAKE_CASE_ ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase__ : List[Any] = 0 if torch.cuda.is_available() else -1
lowerCAmelCase__ : List[Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[int] = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase__ : int = prediction['text']
lowerCAmelCase__ : Optional[int] = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
lowerCAmelCase__ : Dict = dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowerCamelCase__ = parser.parse_args()
main(args) | 212 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__lowercase: Dict = True
except (ImportError, AttributeError):
__lowercase: Any = object
def SCREAMING_SNAKE_CASE__( *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
pass
__lowercase: Optional[int] = False
__lowercase: int = logging.get_logger("transformers-cli/serving")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Namespace ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowerCamelCase__ , args.host , args.port , args.workers )
class UpperCAmelCase ( a_):
_lowerCamelCase : dict
class UpperCAmelCase ( a_):
_lowerCamelCase : List[str]
_lowerCamelCase : Optional[List[int]]
class UpperCAmelCase ( a_):
_lowerCamelCase : str
class UpperCAmelCase ( a_):
_lowerCamelCase : Any
class UpperCAmelCase ( a_):
@staticmethod
def lowercase_ ( a_ : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = parser.add_parser(
"serve", help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task", type=a_, choices=get_supported_tasks(), help="The task to run the pipeline on", )
serve_parser.add_argument("--host", type=a_, default="localhost", help="Interface the server will listen on." )
serve_parser.add_argument("--port", type=a_, default=8888, help="Port the serving will listen to." )
serve_parser.add_argument("--workers", type=a_, default=1, help="Number of http workers" )
serve_parser.add_argument("--model", type=a_, help="Model's name or path to stored model." )
serve_parser.add_argument("--config", type=a_, help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer", type=a_, help="Tokenizer name to use." )
serve_parser.add_argument(
"--device", type=a_, default=-1, help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)", )
serve_parser.set_defaults(func=a_ )
def __init__( self : Tuple, a_ : Any, a_ : Union[str, Any], a_ : int, a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = pipeline
UpperCamelCase__ = host
UpperCamelCase__ = port
UpperCamelCase__ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(f'Serving model over {host}:{port}' )
UpperCamelCase__ = FastAPI(
routes=[
APIRoute(
"/", self.model_info, response_model=a_, response_class=a_, methods=["GET"], ),
APIRoute(
"/tokenize", self.tokenize, response_model=a_, response_class=a_, methods=["POST"], ),
APIRoute(
"/detokenize", self.detokenize, response_model=a_, response_class=a_, methods=["POST"], ),
APIRoute(
"/forward", self.forward, response_model=a_, response_class=a_, methods=["POST"], ),
], timeout=600, )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
run(self._app, host=self.host, port=self.port, workers=self.workers )
def lowercase_ ( self : str ):
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase_ ( self : Optional[Any], a_ : Tuple = Body(a_, embed=a_ ), a_ : str = Body(a_, embed=a_ ) ):
"""simple docstring"""
try:
UpperCamelCase__ = self._pipeline.tokenizer.tokenize(a_ )
if return_ids:
UpperCamelCase__ = self._pipeline.tokenizer.convert_tokens_to_ids(a_ )
return ServeTokenizeResult(tokens=a_, tokens_ids=a_ )
else:
return ServeTokenizeResult(tokens=a_ )
except Exception as e:
raise HTTPException(status_code=500, detail={"model": "", "error": str(a_ )} )
def lowercase_ ( self : Optional[Any], a_ : int = Body(a_, embed=a_ ), a_ : Optional[int] = Body(a_, embed=a_ ), a_ : Any = Body(a_, embed=a_ ), ):
"""simple docstring"""
try:
UpperCamelCase__ = self._pipeline.tokenizer.decode(a_, a_, a_ )
return ServeDeTokenizeResult(model="", text=a_ )
except Exception as e:
raise HTTPException(status_code=500, detail={"model": "", "error": str(a_ )} )
async def lowercase_ ( self : Dict, a_ : int=Body(a_, embed=a_ ) ):
"""simple docstring"""
if len(a_ ) == 0:
return ServeForwardResult(output=[], attention=[] )
try:
# Forward through the model
UpperCamelCase__ = self._pipeline(a_ )
return ServeForwardResult(output=a_ )
except Exception as e:
raise HTTPException(500, {"error": str(a_ )} ) | 371 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] ) | 31 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = parent
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {}
def a ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
UpperCamelCase__ :int = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = MarkupLMFeatureExtractionTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.feature_extraction_class()
# Test not batched input
UpperCamelCase__ :int = get_html_strings()[0]
UpperCamelCase__ :int = feature_extractor(UpperCamelCase_ )
# fmt: off
UpperCamelCase__ :Union[str, Any] = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
UpperCamelCase__ :Optional[Any] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , UpperCamelCase_ )
self.assertEqual(encoding.xpaths , UpperCamelCase_ )
# Test batched
UpperCamelCase__ :str = get_html_strings()
UpperCamelCase__ :Optional[Any] = feature_extractor(UpperCamelCase_ )
# fmt: off
UpperCamelCase__ :List[str] = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
UpperCamelCase__ :int = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCamelCase_ )
self.assertEqual(encoding.xpaths , UpperCamelCase_ ) | 97 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase: List[Any] = logging.get_logger(__name__)
_UpperCamelCase: int = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'megatron-bert'
def __init__( self : int, lowerCAmelCase : List[Any]=29056, lowerCAmelCase : int=1024, lowerCAmelCase : List[str]=24, lowerCAmelCase : Union[str, Any]=16, lowerCAmelCase : Union[str, Any]=4096, lowerCAmelCase : Dict="gelu", lowerCAmelCase : List[str]=0.1, lowerCAmelCase : Any=0.1, lowerCAmelCase : str=512, lowerCAmelCase : str=2, lowerCAmelCase : Any=0.02, lowerCAmelCase : Any=1e-12, lowerCAmelCase : List[str]=0, lowerCAmelCase : List[str]="absolute", lowerCAmelCase : Any=True, **lowerCAmelCase : Union[str, Any], ) -> Tuple:
super().__init__(pad_token_id=lowerCAmelCase, **lowerCAmelCase )
lowercase : Tuple = vocab_size
lowercase : Any = hidden_size
lowercase : int = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : Optional[int] = hidden_act
lowercase : Optional[int] = intermediate_size
lowercase : List[Any] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : Optional[int] = max_position_embeddings
lowercase : Optional[int] = type_vocab_size
lowercase : Any = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Optional[int] = position_embedding_type
lowercase : Optional[int] = use_cache
| 255 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A : List[Any] = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 360 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : List[Any] = trt.Logger(trt.Logger.WARNING)
__A : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[Any] = logging.getLogger(__name__)
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__A : List[str] = parser.parse_args()
if args.tokenizer_name:
__A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__A : List[Any] = args.per_device_eval_batch_size
__A : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : Any = True
__A : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__A : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__A : Dict = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : str = [network.get_input(i) for i in range(network.num_inputs)]
__A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa )
lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa )
lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase )
# start time
lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Tuple = end_time - start_time
lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : int = raw_datasets['''validation'''].column_names
__A : int = '''question''' if '''question''' in column_names else column_names[0]
__A : List[str] = '''context''' if '''context''' in column_names else column_names[1]
__A : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : str = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : int = raw_datasets['''validation''']
# Validation Feature Creation
__A : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__A : List[str] = default_data_collator
__A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__A : Union[str, Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int:
'''simple docstring'''
lowerCAmelCase : str = postprocess_qa_predictions(
examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase : Union[str, Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase )
__A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__A : Union[str, Any] = 0.0
__A : Optional[Any] = 0
__A : Optional[Any] = timeit.default_timer()
__A : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
__A , __A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A : str = outputs
__A : Optional[Any] = torch.tensor(start_logits)
__A : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : str = nested_truncate(all_preds, len(eval_dataset))
__A : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 323 | 0 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a : Union[str, Any] = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowercase : torch.nn.Module , _lowercase : BnbQuantizationConfig , _lowercase : Union[str, os.PathLike] = None , _lowercase : Optional[Dict[str, Union[int, str, torch.device]]] = None , _lowercase : Optional[List[str]] = None , _lowercase : Optional[Dict[Union[int, str], Union[int, str]]] = None , _lowercase : Optional[Union[str, os.PathLike]] = None , _lowercase : bool = False , ) ->Union[str, Any]:
'''simple docstring'''
a : Tuple = bnb_quantization_config.load_in_abit
a : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
a : Tuple = []
# custom device map
if isinstance(_lowercase , _lowercase ) and len(device_map.keys() ) > 1:
a : List[str] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a : List[str] = get_keys_to_not_convert(_lowercase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowercase )
a : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a : List[str] = []
a : List[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowercase )
# compatibility with peft
a : List[str] = load_in_abit
a : Optional[int] = load_in_abit
a : Union[str, Any] = get_parameter_device(_lowercase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
a : Optional[int] = replace_with_bnb_layers(_lowercase , _lowercase , modules_to_not_convert=_lowercase )
# convert param to the right dtype
a : Optional[int] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
a : Optional[int] = name.replace(".weight" , "" ).replace(".bias" , "" )
a : Any = getattr(_lowercase , _lowercase , _lowercase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowercase ):
param.to(_lowercase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
a : int = replace_with_bnb_layers(
_lowercase , _lowercase , modules_to_not_convert=_lowercase )
a : Tuple = get_quantized_model_device_map(
_lowercase , _lowercase , _lowercase , max_memory=_lowercase , no_split_module_classes=_lowercase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a : List[str] = True
a : Tuple = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
_lowercase , _lowercase , _lowercase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowercase , offload_state_dict=_lowercase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowercase , device_map=_lowercase , offload_dir=_lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : int , _lowercase : Union[str, Any]=None , _lowercase : Any=None , _lowercase : Union[str, Any]=None ) ->List[Any]:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
a : Union[str, Any] = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(_lowercase , _lowercase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
a : List[str] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
a : Optional[Any] = {}
a : Optional[int] = special_dtypes
a : Union[str, Any] = no_split_module_classes
a : List[str] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a : Optional[int] = get_balanced_memory(
_lowercase , low_zero=(device_map == "balanced_low_0") , max_memory=_lowercase , **_lowercase , )
a : Any = max_memory
a : Dict = infer_auto_device_map(_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ):
# check if don't have any quantized module on the cpu
a : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : Tuple , _lowercase : Tuple=None , _lowercase : List[Any]=None ) ->int:
'''simple docstring'''
if modules_to_not_convert is None:
a : Union[str, Any] = []
a, a : Optional[Any] = _replace_with_bnb_layers(
_lowercase , _lowercase , _lowercase , _lowercase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Dict , _lowercase : List[Any]=None , _lowercase : str=None , ) ->Optional[int]:
'''simple docstring'''
a : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
a : Tuple = []
current_key_name.append(_lowercase )
if isinstance(_lowercase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a : Union[str, Any] = ".".join(_lowercase )
a : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a : str = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a : Union[str, Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowercase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
a : Union[str, Any] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
a : Union[str, Any] = module.weight.data
if module.bias is not None:
a : List[str] = module.bias.data
bnb_module.requires_grad_(_lowercase )
setattr(_lowercase , _lowercase , _lowercase )
a : Optional[int] = True
if len(list(module.children() ) ) > 0:
a, a : Union[str, Any] = _replace_with_bnb_layers(
_lowercase , _lowercase , _lowercase , _lowercase )
a : str = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
with init_empty_weights():
a : Dict = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a : int = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase , _lowercase ):
a : Dict = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
a : Union[str, Any] = sum(_lowercase , [] )
a : Any = len(_lowercase ) > 0
# Check if it is a base model
a : int = False
if hasattr(_lowercase , "base_model_prefix" ):
a : List[Any] = not hasattr(_lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a : Optional[Any] = list(model.named_children() )
a : Dict = [list_modules[-1][0]]
# add last module together with tied weights
a : str = set(_lowercase ) - set(_lowercase )
a : Optional[Any] = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
a : Optional[int] = [".weight", ".bias"]
a : List[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a : Dict = name.replace(_lowercase , "" )
filtered_module_names.append(_lowercase )
return filtered_module_names
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->Optional[int]:
'''simple docstring'''
for m in model.modules():
if isinstance(_lowercase , bnb.nn.Linearabit ):
return True
return False
def _SCREAMING_SNAKE_CASE ( _lowercase : nn.Module ) ->Union[str, Any]:
'''simple docstring'''
return next(parameter.parameters() ).device
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : List[str] ) ->Dict:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(_lowercase , _lowercase , 0 , dtype=_lowercase , value=_lowercase )
a : str = param_name
a : int = model
if "." in tensor_name:
a : str = tensor_name.split("." )
for split in splits[:-1]:
a : Tuple = getattr(_lowercase , _lowercase )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
a : Dict = new_module
a : Any = splits[-1]
# offload weights
a : Dict = False
offload_weight(module._parameters[tensor_name] , _lowercase , _lowercase , index=_lowercase )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , _lowercase , index=_lowercase , )
else:
offload_weight(_lowercase , _lowercase , _lowercase , index=_lowercase )
offload_weight(_lowercase , param_name.replace("weight" , "SCB" ) , _lowercase , index=_lowercase )
set_module_tensor_to_device(_lowercase , _lowercase , "meta" , dtype=_lowercase , value=torch.empty(*param.size() ) )
| 105 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_a = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_2_8,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 5_0,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 1_0,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 1_0,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls : Dict ) -> List[str]:
__lowerCAmelCase: str = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls : str ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCAmelCase ( self : int ) -> Optional[int]:
__lowerCAmelCase: Any = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCAmelCase: str = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , repo_id='test-config' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : int ) -> Dict:
__lowerCAmelCase: int = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCAmelCase: Dict = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='valid_org/test-config-org' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
CustomConfig.register_for_auto_class()
__lowerCAmelCase: Any = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCAmelCase: int = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: List[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase: Union[str, Any] = c.n_embd + 1 # int
__lowerCAmelCase: str = c.resid_pdrop + 1.0 # float
__lowerCAmelCase: List[Any] = not c.scale_attn_weights # bool
__lowerCAmelCase: List[str] = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(UpperCAmelCase , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCAmelCase , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCAmelCase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCAmelCase , c.summary_type , 'mismatch for key: summary_type' )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: str = PretrainedConfig()
__lowerCAmelCase: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCAmelCase: int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase , UpperCAmelCase )]
if len(UpperCAmelCase ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(UpperCAmelCase )}.''' )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
with self.assertRaises(UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCAmelCase: List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase: Union[str, Any] = mock.Mock()
__lowerCAmelCase: str = 5_0_0
__lowerCAmelCase: Optional[Any] = {}
__lowerCAmelCase: Optional[int] = HTTPError
__lowerCAmelCase: List[Any] = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase: Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase ) as mock_head:
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase: Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCAmelCase ( self : Dict ) -> str:
__lowerCAmelCase: Optional[Any] = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCAmelCase: Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase )
__lowerCAmelCase: Tuple = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase: Dict = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase: Dict = ['config.42.0.0.json']
__lowerCAmelCase: Optional[int] = 7_6_8
configuration.save_pretrained(UpperCAmelCase )
shutil.move(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , os.path.join(UpperCAmelCase , 'config.42.0.0.json' ) )
__lowerCAmelCase: int = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCAmelCase: Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCAmelCase: List[Any] = 'v4.0.0'
__lowerCAmelCase , __lowerCAmelCase: Any = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase , return_unused_kwargs=UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase: List[Any] = 'v3.0.0'
__lowerCAmelCase: Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 322 | 0 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int | float]] ) -> Optional[int]:
UpperCAmelCase_ = len(__UpperCamelCase )
UpperCAmelCase_ = len(matrix[0] )
UpperCAmelCase_ = min(__UpperCamelCase , __UpperCamelCase )
for row in range(__UpperCamelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __UpperCamelCase ):
UpperCAmelCase_ = matrix[col][row] / matrix[row][row]
for i in range(__UpperCamelCase , __UpperCamelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCAmelCase_ = True
for i in range(row + 1 , __UpperCamelCase ):
if matrix[i][row] != 0:
UpperCAmelCase_ , UpperCAmelCase_ = matrix[i], matrix[row]
UpperCAmelCase_ = False
break
if reduce:
rank -= 1
for i in range(__UpperCamelCase ):
UpperCAmelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 | 0 |
'''simple docstring'''
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Optional[int] = list[list[int]]
# assigning initial values to the grid
__SCREAMING_SNAKE_CASE : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__SCREAMING_SNAKE_CASE : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase_ ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase_ ( _UpperCAmelCase : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(_UpperCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : int = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase : Dict = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
_UpperCAmelCase : Tuple = 0
return None
def UpperCamelCase_ ( _UpperCAmelCase : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(_UpperCAmelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
__SCREAMING_SNAKE_CASE : Tuple = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 31 |
def __SCREAMING_SNAKE_CASE ( snake_case_ = 1000 ):
'''simple docstring'''
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 133 | 0 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=10_00 , ):
'''simple docstring'''
__snake_case : int = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : Optional[Any] = num_channels
__snake_case : str = image_size
__snake_case : Tuple = patch_size
__snake_case : str = text_seq_length
__snake_case : List[Any] = is_training
__snake_case : List[str] = use_input_mask
__snake_case : List[str] = use_token_type_ids
__snake_case : List[str] = use_labels
__snake_case : Any = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : int = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Optional[int] = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : List[Any] = coordinate_size
__snake_case : List[str] = shape_size
__snake_case : Optional[int] = num_labels
__snake_case : Dict = num_choices
__snake_case : Any = scope
__snake_case : List[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case : int = text_seq_length
__snake_case : Optional[Any] = (image_size // patch_size) ** 2 + 1
__snake_case : List[str] = self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : Dict = bbox[i, j, 3]
__snake_case : List[str] = bbox[i, j, 1]
__snake_case : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : Any = bbox[i, j, 2]
__snake_case : Optional[Any] = bbox[i, j, 0]
__snake_case : List[str] = t
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
if self.use_input_mask:
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__snake_case : Union[str, Any] = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__snake_case : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
__snake_case : Dict = model(a_ , pixel_values=a_ )
__snake_case : Union[str, Any] = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
__snake_case : Union[str, Any] = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
__snake_case : str = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__snake_case : Any = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : int = self.num_labels
__snake_case : Optional[int] = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : List[str] = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = self.num_labels
__snake_case : List[Any] = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.prepare_config_and_inputs()
(
__snake_case
) : List[Any] = config_and_inputs
__snake_case : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = LayoutLMvaModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_=False ):
'''simple docstring'''
__snake_case : int = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
__snake_case : Any = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
__snake_case : Tuple = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
__snake_case : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
__snake_case : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
__snake_case : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
__snake_case : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Optional[int] = type
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[int] = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowercase ( ) ->List[Any]:
"""simple docstring"""
__snake_case : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(a_ )
__snake_case : List[Any] = self.default_image_processor
__snake_case : List[Any] = prepare_img()
__snake_case : int = image_processor(images=a_ , return_tensors='''pt''' ).pixel_values.to(a_ )
__snake_case : Tuple = torch.tensor([[1, 2]] )
__snake_case : Union[str, Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__snake_case : Tuple = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
__snake_case : List[str] = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1E-4 ) )
| 370 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = torch.nn.Linear(2 , 4 )
__snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( _snake_case : str ) ->Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case : Any = Accelerator(cpu=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case : Optional[int] = GradientState()
assert state.num_steps == 1
__snake_case : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case : List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : Any = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
__snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f:
__snake_case : Any = json.load(a_ )
__snake_case : List[str] = config['''class_name''']
__snake_case : str = accelerator.register_save_state_pre_hook(a_ )
__snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Any = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components()
__snake_case : Union[str, Any] = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
__snake_case : Optional[int] = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , )
__snake_case : Optional[Any] = Accelerator()
# This should work
__snake_case : Any = accelerator.prepare(a_ )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Any = Accelerator()
with init_empty_weights():
__snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : Union[str, Any] = infer_auto_device_map(a_ )
__snake_case : str = '''cpu'''
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Dict = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : List[Any] = infer_auto_device_map(a_ )
__snake_case : Dict = 1
__snake_case : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Tuple = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case : Tuple = infer_auto_device_map(a_ )
__snake_case : Tuple = 1
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Tuple = Accelerator()
# This should work
__snake_case : Dict = accelerator.prepare(a_ )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = torch.nn.Linear(10 , 10 )
__snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case : Optional[Any] = Accelerator(cpu=a_ )
__snake_case : str = accelerator.prepare(a_ )
| 24 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
A : Union[str, Any] = '▁'
A : Optional[int] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
A : str = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
A : Any = {
'facebook/s2t-small-librispeech-asr': 1_0_2_4,
}
A : Dict = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
A : Union[str, Any] = {'mustc': MUSTC_LANGS}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = MAX_MODEL_INPUT_SIZES
A__ = ['''input_ids''', '''attention_mask''']
A__ = []
def __init__(self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]="<s>" , _UpperCAmelCase : Optional[int]="</s>" , _UpperCAmelCase : Optional[Any]="<pad>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : Any=False , _UpperCAmelCase : Dict=False , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : List[Any] , ) -> None:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
lowercase__ = do_upper_case
lowercase__ = do_lower_case
lowercase__ = load_json(_UpperCAmelCase )
lowercase__ = {v: k for k, v in self.encoder.items()}
lowercase__ = spm_file
lowercase__ = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
lowercase__ = lang_codes
lowercase__ = LANGUAGES[lang_codes]
lowercase__ = [f'''<lang:{lang}>''' for lang in self.langs]
lowercase__ = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
lowercase__ = self.lang_tokens
lowercase__ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowercase__ = {}
@property
def lowerCamelCase__ (self : str ) -> int:
"""simple docstring"""
return len(self.encoder )
@property
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def lowerCamelCase__ (self : Any , _UpperCAmelCase : int ) -> None:
"""simple docstring"""
lowercase__ = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowerCamelCase__ (self : str , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
lowercase__ = self.lang_code_to_id[tgt_lang]
lowercase__ = [lang_code_id]
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowercase__ = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowercase__ = []
else:
current_sub_tokens.append(_UpperCAmelCase )
lowercase__ = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
lowercase__ = [1] * len(self.prefix_tokens )
lowercase__ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__(self : Optional[Any] , _UpperCAmelCase : Dict ) -> None:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ = {}
lowercase__ = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ = Path(_UpperCAmelCase )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
lowercase__ = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
lowercase__ = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
lowercase__ = sentencepiece.SentencePieceProcessor(**__magic_name__ )
spm.Load(str(__magic_name__ ) )
return spm
def UpperCamelCase ( __magic_name__ : str ) -> Union[Dict, List]:
"""simple docstring"""
with open(__magic_name__ , """r""" ) as f:
return json.load(__magic_name__ )
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : str ) -> None:
"""simple docstring"""
with open(__magic_name__ , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ , indent=2 )
| 305 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=99 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=4 , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = True
A__ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaxBertModelTester(self )
@slow
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 305 | 1 |
"""simple docstring"""
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = size
__lowerCAmelCase : str = [0] * size
__lowerCAmelCase : Any = [0] * size
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
return index | (index + 1)
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
return (index & (index + 1)) - 1
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = value
while index < self.size:
__lowerCAmelCase : Dict = self.get_prev(_SCREAMING_SNAKE_CASE ) + 1
if current_left_border == index:
__lowerCAmelCase : Any = value
else:
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.get_next(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
right -= 1 # Because of right is exclusive
__lowerCAmelCase : Optional[int] = 0
while left <= right:
__lowerCAmelCase : Optional[int] = self.get_prev(_SCREAMING_SNAKE_CASE )
if left <= current_left:
__lowerCAmelCase : Optional[Any] = max(_SCREAMING_SNAKE_CASE , self.tree[right] )
__lowerCAmelCase : Optional[Any] = current_left
else:
__lowerCAmelCase : List[str] = max(_SCREAMING_SNAKE_CASE , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 182 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__lowerCAmelCase : Optional[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = 'sgugger/tiny-distilbert-classification'
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : Any = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'patrickvonplaten/t5-tiny-random'
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
__lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) ).exists() )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'sequential' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'cumulative' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'current' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) ).exists() ) | 182 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
return Dataset.from_dict(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self._create_example_records()
lowercase = Dataset.from_list(snake_case )
self.assertListEqual(dset.column_names , ['col_1', 'col_2'] )
for i, r in enumerate(snake_case ):
self.assertDictEqual(snake_case , example_records[i] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self._create_example_records()
lowercase = Dataset.from_list(snake_case )
lowercase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def SCREAMING_SNAKE_CASE__ ( self ): # checks what happens with missing columns
lowercase = [{'col_1': 1}, {'col_2': 'x'}]
lowercase = Dataset.from_list(snake_case )
self.assertDictEqual(dset[0] , {'col_1': 1} )
self.assertDictEqual(dset[1] , {'col_1': None} ) # NB: first record is used for columns
def SCREAMING_SNAKE_CASE__ ( self ): # checks if the type can be inferred from the second record
lowercase = [{'col_1': []}, {'col_1': [1, 2]}]
lowercase = Dataset.from_list(snake_case )
self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64' ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = Dataset.from_list([] )
self.assertEqual(len(snake_case ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 195 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = r'\w+[.]\d+'
lowercase = re.findall(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for pat in pats:
lowercase = key.replace(__SCREAMING_SNAKE_CASE , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
lowercase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=42 ):
# Step 1: Convert pytorch tensor to numpy
lowercase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase = flax_model.init_weights(PRNGKey(__SCREAMING_SNAKE_CASE ) )
lowercase = flatten_dict(__SCREAMING_SNAKE_CASE )
lowercase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase = rename_key(__SCREAMING_SNAKE_CASE )
lowercase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
lowercase , lowercase = rename_key_and_reshape_tensor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase = jnp.asarray(__SCREAMING_SNAKE_CASE )
return unflatten_dict(__SCREAMING_SNAKE_CASE )
| 195 | 1 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''vqvae''']
def __init__( self , lowercase , lowercase , lowercase , lowercase , ) -> Any:
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase , mel=lowercase , vqvae=lowercase )
def __lowerCamelCase ( self ) -> int:
return 5_0 if isinstance(self.scheduler , lowercase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = None , lowercase = None , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
__UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase )
__UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase , device=self.device , )
__UpperCamelCase = noise
__UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase , lowercase )
__UpperCamelCase = self.mel.audio_slice_to_image(lowercase )
__UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
__UpperCamelCase = (input_image / 2_5_5) * 2 - 1
__UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__UpperCamelCase = self.vqvae.encode(torch.unsqueeze(lowercase , 0 ) ).latent_dist.sample(
generator=lowercase )[0]
__UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__UpperCamelCase = self.scheduler.add_noise(lowercase , lowercase , self.scheduler.timesteps[start_step - 1] )
__UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__UpperCamelCase = int(mask_start_secs * pixels_per_second )
__UpperCamelCase = int(mask_end_secs * pixels_per_second )
__UpperCamelCase = self.scheduler.add_noise(lowercase , lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase ):
__UpperCamelCase = self.unet(lowercase , lowercase , lowercase )["""sample"""]
else:
__UpperCamelCase = self.unet(lowercase , lowercase )["""sample"""]
if isinstance(self.scheduler , lowercase ):
__UpperCamelCase = self.scheduler.step(
model_output=lowercase , timestep=lowercase , sample=lowercase , eta=lowercase , generator=lowercase , )["""prev_sample"""]
else:
__UpperCamelCase = self.scheduler.step(
model_output=lowercase , timestep=lowercase , sample=lowercase , generator=lowercase , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
__UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
__UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
__UpperCamelCase = self.vqvae.decode(lowercase )["""sample"""]
__UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
__UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__UpperCamelCase = (images * 2_5_5).round().astype("""uint8""" )
__UpperCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase , mode="""RGB""" ).convert("""L""" ) for _ in images) )
__UpperCamelCase = [self.mel.image_to_audio(lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase ) )
@torch.no_grad()
def __lowerCamelCase ( self , lowercase , lowercase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , lowercase )
self.scheduler.set_timesteps(lowercase )
__UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
__UpperCamelCase = (sample / 2_5_5) * 2 - 1
__UpperCamelCase = torch.Tensor(lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__UpperCamelCase = self.scheduler.alphas_cumprod[t]
__UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__UpperCamelCase = 1 - alpha_prod_t
__UpperCamelCase = self.unet(lowercase , lowercase )["""sample"""]
__UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( lowercase , lowercase , lowercase ) -> torch.Tensor:
__UpperCamelCase = acos(torch.dot(torch.flatten(lowercase ) , torch.flatten(lowercase ) ) / torch.norm(lowercase ) / torch.norm(lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase ) + sin(alpha * theta ) * xa / sin(lowercase )
| 243 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'nielsr/canine-s': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
a__ : Any = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
a__ : List[str] = 0
a__ : int = 0Xe0_00
a__ : Any = 0Xe0_01
a__ : Union[str, Any] = 0Xe0_02
a__ : Tuple = 0Xe0_03
a__ : Tuple = 0Xe0_04
# Maps special codepoints to human-readable names.
a__ : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
a__ : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=False , lowercase=2_0_4_8 , **lowercase , ) -> str:
__UpperCamelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else bos_token
__UpperCamelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else eos_token
__UpperCamelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else sep_token
__UpperCamelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else cls_token
__UpperCamelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , model_max_length=lowercase , **lowercase , )
# Creates a mapping for looking up the IDs of special symbols.
__UpperCamelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__UpperCamelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__UpperCamelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__UpperCamelCase = UNICODE_VOCAB_SIZE
__UpperCamelCase = len(self._special_codepoints )
@property
def __lowerCamelCase ( self ) -> int:
return self._unicode_vocab_size
def __lowerCamelCase ( self , lowercase ) -> List[str]:
return list(lowercase )
def __lowerCamelCase ( self , lowercase ) -> int:
try:
return ord(lowercase )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def __lowerCamelCase ( self , lowercase ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowercase )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def __lowerCamelCase ( self , lowercase ) -> Tuple:
return "".join(lowercase )
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
__UpperCamelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
__UpperCamelCase = [1] + ([0] * len(lowercase )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowercase )) + [1]
return result
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
__UpperCamelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Optional[Any]:
return ()
| 243 | 1 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = 0
@slow
def UpperCamelCase__ ( self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCamelCase : Any = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__magic_name__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__magic_name__ ) , 0 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
# Check that tokenizer_type ≠ model_type
lowerCamelCase : int = AutoTokenizer.from_pretrained(__magic_name__ , config=__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__magic_name__ , """vocab.txt""" ) )
lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""bert""" , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__magic_name__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__magic_name__ , """merges.txt""" ) )
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""gpt2""" , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
@require_tokenizers
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__magic_name__ , """vocab.txt""" ) )
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""bert""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__magic_name__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__magic_name__ , """merges.txt""" ) )
lowerCamelCase : Any = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""gpt2""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
with pytest.raises(__magic_name__ ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def UpperCamelCase__ ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCamelCase : Union[str, Any] = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(__magic_name__ , __magic_name__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __magic_name__ )
else:
self.assertEqual(tokenizer.do_lower_case , __magic_name__ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def UpperCamelCase__ ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__magic_name__ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
lowerCamelCase : List[str] = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def UpperCamelCase__ ( self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCamelCase : Tuple = TOKENIZER_MAPPING.values()
lowerCamelCase : Tuple = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__magic_name__ )
@require_tokenizers
def UpperCamelCase__ ( self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__magic_name__ ) , __magic_name__ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , __magic_name__ )
@require_tokenizers
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=__magic_name__ )
lowerCamelCase : str = """Hello, world. How are you?"""
lowerCamelCase : int = tokenizer.tokenize(__magic_name__ )
self.assertEqual("""[UNK]""" , tokens[0] )
lowerCamelCase : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer.tokenize(__magic_name__ )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(__magic_name__ ) , __magic_name__ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
lowerCamelCase : Any = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
# Check we can load the tokenizer config of an online model.
lowerCamelCase : Dict = get_tokenizer_config("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = config.pop("""_commit_hash""" , __magic_name__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__magic_name__ , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCamelCase : Union[str, Any] = get_tokenizer_config(__magic_name__ )
self.assertDictEqual(__magic_name__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
lowerCamelCase : List[str] = get_tokenizer_config(__magic_name__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def UpperCamelCase__ ( self ):
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
lowerCamelCase : str = CustomTokenizer.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
lowerCamelCase : Any = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCamelCase__ ( self ):
try:
AutoConfig.register("""custom""" , __magic_name__ )
# Can register in two steps
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__magic_name__ , slow_tokenizer_class=__magic_name__ , fast_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Dict = BertTokenizerFast.from_pretrained(__magic_name__ )
bert_tokenizer.save_pretrained(__magic_name__ )
lowerCamelCase : Optional[Any] = CustomTokenizerFast.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(__magic_name__ , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__magic_name__ ):
lowerCamelCase : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__magic_name__ ):
lowerCamelCase : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(__magic_name__ , trust_remote_code=__magic_name__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowerCamelCase : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(__magic_name__ , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def UpperCamelCase__ ( self ):
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = False
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : int = NewTokenizer
_UpperCAmelCase : List[str] = False
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
# If remote code is not set, the default is to use local
lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
lowerCamelCase : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowerCamelCase : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def UpperCamelCase__ ( self ):
with self.assertRaisesRegex(
__magic_name__ , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("""bert-base""" )
def UpperCamelCase__ ( self ):
with self.assertRaisesRegex(
__magic_name__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCamelCase : Any = AutoTokenizer.from_pretrained(__magic_name__ , revision="""aaaaaa""" )
def UpperCamelCase__ ( self ):
# Make sure we have cached the tokenizer.
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 287 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 1 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(lowerCAmelCase__ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(lowerCAmelCase__ )
}
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
return new_checkpoint
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , ):
# Only support V1
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(lowerCAmelCase__ )
UpperCAmelCase_ = 512
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(lowerCAmelCase__ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(lowerCAmelCase__ )
else:
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(lowerCAmelCase__ , image_size=lowerCAmelCase__ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = AutoencoderKL(**lowerCAmelCase__ )
vae.load_state_dict(lowerCAmelCase__ )
vae.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
lowerCamelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 359 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 241 | 0 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 66 | """simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( __lowerCAmelCase ):
a__ = 42
a__ = jnp.floataa
a__ = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
super().setup()
a__: int = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
a__: Dict = super().__call__(*lowercase , **lowercase)
a__: str = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class __snake_case ( __lowerCAmelCase ):
a__ = FlaxBigBirdForNaturalQuestionsModule
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
def cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
a__: Any = logits.shape[-1]
a__: List[Any] = (labels[..., None] == jnp.arange(_SCREAMING_SNAKE_CASE )[None]).astype('f4' )
a__: List[str] = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
a__: Dict = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
a__: str = reduction(_SCREAMING_SNAKE_CASE )
return loss
a__: Tuple = partial(_SCREAMING_SNAKE_CASE , reduction=jnp.mean )
a__: List[str] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
a__ = "google/bigbird-roberta-base"
a__ = 3000
a__ = 1_0500
a__ = 128
a__ = 3
a__ = 1
a__ = 5
# tx_args
a__ = 3e-5
a__ = 0.0
a__ = 2_0000
a__ = 0.0095
a__ = "bigbird-roberta-natural-questions"
a__ = "training-expt"
a__ = "data/nq-training.jsonl"
a__ = "data/nq-validation.jsonl"
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=lowercase)
a__: str = os.path.join(self.base_dir , self.save_dir)
a__: List[str] = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
a__ = 42
a__ = 4096 # no dynamic padding on TPUs
def __call__( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: int = self.collate_fn(lowercase)
a__: Optional[int] = jax.tree_util.tree_map(lowercase , lowercase)
return batch
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__ , a__: Dict = self.fetch_inputs(features['input_ids'])
a__: List[Any] = {
'input_ids': jnp.array(lowercase , dtype=jnp.intaa),
'attention_mask': jnp.array(lowercase , dtype=jnp.intaa),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa),
}
return batch
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
a__: List[Any] = [self._fetch_inputs(lowercase) for ids in input_ids]
return zip(*lowercase)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = [1 for _ in range(len(lowercase))]
while len(lowercase) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
if seed is not None:
a__: int = dataset.shuffle(seed=_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) // batch_size ):
a__: Union[str, Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_SCREAMING_SNAKE_CASE )
@partial(jax.pmap , axis_name='batch' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Any:
def loss_fn(_SCREAMING_SNAKE_CASE ):
a__: str = model_inputs.pop('start_labels' )
a__: Dict = model_inputs.pop('end_labels' )
a__: Optional[int] = model_inputs.pop('pooled_labels' )
a__: Optional[Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , dropout_rng=_SCREAMING_SNAKE_CASE , train=_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Optional[int] = outputs
return state.loss_fn(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
a__ , a__: Union[str, Any] = jax.random.split(_SCREAMING_SNAKE_CASE )
a__: List[Any] = jax.value_and_grad(_SCREAMING_SNAKE_CASE )
a__ , a__: str = grad_fn(state.params )
a__: Optional[int] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
a__: int = jax.lax.pmean(_SCREAMING_SNAKE_CASE , 'batch' )
a__: Union[str, Any] = state.apply_gradients(grads=_SCREAMING_SNAKE_CASE )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def __a ( _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: Optional[int] = model_inputs.pop('start_labels' )
a__: int = model_inputs.pop('end_labels' )
a__: Dict = model_inputs.pop('pooled_labels' )
a__: Union[str, Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=state.params , train=_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: int = outputs
a__: Optional[int] = state.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Tuple = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class __snake_case ( train_state.TrainState ):
a__ = struct.field(pytree_node=__lowerCAmelCase )
@dataclass
class __snake_case :
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = None
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None) -> Optional[int]:
'''simple docstring'''
a__: Dict = model.params
a__: Any = TrainState.create(
apply_fn=model.__call__ , params=lowercase , tx=lowercase , loss_fn=lowercase , )
if ckpt_dir is not None:
a__ , a__ , a__ , a__ , a__: Any = restore_checkpoint(lowercase , lowercase)
a__: Any = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
a__ , a__: str = build_tx(**lowercase)
a__: Optional[Any] = train_state.TrainState(
step=lowercase , apply_fn=model.__call__ , params=lowercase , tx=lowercase , opt_state=lowercase , )
a__: int = args
a__: Union[str, Any] = data_collator
a__: Any = lr
a__: Dict = params
a__: Tuple = jax_utils.replicate(lowercase)
return state
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: int = self.args
a__: str = len(lowercase) // args.batch_size
a__: Tuple = jax.random.PRNGKey(0)
a__: List[Any] = jax.random.split(lowercase , jax.device_count())
for epoch in range(args.max_epochs):
a__: str = jnp.array(0 , dtype=jnp.floataa)
a__: Tuple = get_batched_dataset(lowercase , args.batch_size , seed=lowercase)
a__: Optional[int] = 0
for batch in tqdm(lowercase , total=lowercase , desc=f'Running EPOCH-{epoch}'):
a__: List[str] = self.data_collator(lowercase)
a__ , a__ , a__: int = self.train_step_fn(lowercase , lowercase , **lowercase)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
if i % args.logging_steps == 0:
a__: List[Any] = jax_utils.unreplicate(state.step)
a__: Tuple = running_loss.item() / i
a__: Optional[Any] = self.scheduler_fn(state_step - 1)
a__: List[Any] = self.evaluate(lowercase , lowercase)
a__: List[str] = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(lowercase))
self.logger.log(lowercase , commit=lowercase)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Tuple = get_batched_dataset(lowercase , self.args.batch_size)
a__: Dict = len(lowercase) // self.args.batch_size
a__: Tuple = jnp.array(0 , dtype=jnp.floataa)
a__: List[Any] = 0
for batch in tqdm(lowercase , total=lowercase , desc='Evaluating ... '):
a__: str = self.data_collator(lowercase)
a__: List[str] = self.val_step_fn(lowercase , **lowercase)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
return running_loss / i
def lowerCamelCase_ ( self , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: List[Any] = jax_utils.unreplicate(lowercase)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=' ... ')
self.model_save_fn(lowercase , params=state.params)
with open(os.path.join(lowercase , 'opt_state.msgpack') , 'wb') as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(lowercase , 'args.joblib'))
joblib.dump(self.data_collator , os.path.join(lowercase , 'data_collator.joblib'))
with open(os.path.join(lowercase , 'training_state.json') , 'w') as f:
json.dump({'step': state.step.item()} , lowercase)
print('DONE')
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=' ... ' )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'flax_model.msgpack' ) , 'rb' ) as f:
a__: int = from_bytes(state.params , f.read() )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'opt_state.msgpack' ) , 'rb' ) as f:
a__: Optional[Any] = from_bytes(state.opt_state , f.read() )
a__: Optional[Any] = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'args.joblib' ) )
a__: int = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'data_collator.joblib' ) )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'training_state.json' ) , 'r' ) as f:
a__: Any = json.load(_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
a__: str = num_train_steps - warmup_steps
a__: str = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=_SCREAMING_SNAKE_CASE , transition_steps=_SCREAMING_SNAKE_CASE )
a__: List[Any] = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=1e-7 , transition_steps=_SCREAMING_SNAKE_CASE )
a__: int = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
def weight_decay_mask(_SCREAMING_SNAKE_CASE ):
a__: List[Any] = traverse_util.flatten_dict(_SCREAMING_SNAKE_CASE )
a__: List[str] = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(_SCREAMING_SNAKE_CASE )
a__: List[str] = scheduler_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = optax.adamw(learning_rate=_SCREAMING_SNAKE_CASE , weight_decay=_SCREAMING_SNAKE_CASE , mask=_SCREAMING_SNAKE_CASE )
return tx, lr
| 290 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _A ( metaclass=__lowercase ):
lowercase__: int = ['''speech''']
def __init__( self : Tuple , *__magic_name__ : List[Any] , **__magic_name__ : Dict ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""speech"""] )
class _A ( metaclass=__lowercase ):
lowercase__: List[str] = ['''speech''']
def __init__( self : Optional[Any] , *__magic_name__ : Optional[Any] , **__magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""speech"""] )
| 13 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Optional[int] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _a ( _lowerCamelCase = 5000 ) -> int:
"""simple docstring"""
__snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )]
for i, pentagonal_i in enumerate(_lowerCamelCase ):
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case : Optional[int] = pentagonal_nums[j]
__snake_case : str = pentagonal_i + pentagonal_j
__snake_case : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 13 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.