code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
_a = 'mra'
def __init__( self : Optional[int] , lowerCAmelCase : List[str]=5_0265 , lowerCAmelCase : int=768 , lowerCAmelCase : List[Any]=12 , lowerCAmelCase : Any=12 , lowerCAmelCase : List[Any]=3072 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : int=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : List[Any]=512 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Dict=1e-5 , lowerCAmelCase : Union[str, Any]="absolute" , lowerCAmelCase : Dict=4 , lowerCAmelCase : List[str]="full" , lowerCAmelCase : Union[str, Any]=0 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Optional[int]=1 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=2 , **lowerCAmelCase : Optional[int] , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = block_per_row
lowerCAmelCase = approx_mode
lowerCAmelCase = initial_prior_first_n_blocks
lowerCAmelCase = initial_prior_diagonal_n_blocks
| 169 |
import math
from datetime import datetime, timedelta
def _snake_case (__lowercase):
UpperCamelCase_ = year % 19
UpperCamelCase_ = year % 4
UpperCamelCase_ = year % 7
UpperCamelCase_ = math.floor(year / 100)
UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25)
UpperCamelCase_ = leap_day_inhibits / 4
UpperCamelCase_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 19)
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 18)
else:
return datetime(__lowercase , 3 , 22) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday))
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
snake_case__ : Dict = """will be""" if year > datetime.now().year else """was"""
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 23 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 597 |
import requests
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = {'Content-Type': 'application/json'}
UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase)
if response.status_code != 200:
UpperCamelCase_ = (
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__lowercase)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 23 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( UpperCAmelCase__ ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] = 1 , __SCREAMING_SNAKE_CASE : List[str] = 1_00 , __SCREAMING_SNAKE_CASE : Any = None , __SCREAMING_SNAKE_CASE : List[str] = None , __SCREAMING_SNAKE_CASE : str = True , ):
if audio_length_in_s is None:
__a = self.unet.config.sample_size / self.unet.config.sample_rate
__a = audio_length_in_s * self.unet.config.sample_rate
__a = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
__a = int(_UpperCAmelCase )
if sample_size % down_scale_factor != 0:
__a = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
" process." )
__a = int(_UpperCAmelCase )
__a = next(iter(self.unet.parameters() ) ).dtype
__a = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__a = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase , device=audio.device )
__a = self.scheduler.timesteps.to(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__a = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
__a = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
__a = audio.clamp(-1 , 1 ).float().cpu().numpy()
__a = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_UpperCAmelCase )
| 197 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCamelCase_ = input_file.read()
UpperCamelCase_ = regexp.search(_UpperCAmelCase )
return match
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCamelCase_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase_ = regexp.finditer(_UpperCAmelCase )
UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 23 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : Union[str, Any] = True , __a : List[Any] = False ) ->str:
lowerCamelCase_ : int = scheduler
lowerCamelCase_ : List[Any] = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
lowerCamelCase_ : int = split_batches
lowerCamelCase_ : str = step_with_optimizer
lowerCamelCase_ : Any = GradientState()
def _lowerCAmelCase ( self : Any , *__a : Any , **__a : Optional[Any] ) ->List[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCamelCase_ : Tuple = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
return self.scheduler.get_last_lr()
def _lowerCAmelCase ( self : int ) ->Union[str, Any]:
return self.scheduler.state_dict()
def _lowerCAmelCase ( self : Optional[Any] , __a : int ) ->List[str]:
self.scheduler.load_state_dict(_UpperCAmelCase )
def _lowerCAmelCase ( self : Any ) ->List[Any]:
return self.scheduler.get_lr()
def _lowerCAmelCase ( self : Dict , *__a : Dict , **__a : int ) ->Optional[Any]:
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 278 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ):
set_seed(3)
# generate train_data and objective_set
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
__lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# load pretrained model
UpperCamelCase_ = load_gpta('gpt2').to(__lowercase)
print('computing perplexity on objective set')
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item()
print('perplexity on objective set:' , __lowercase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ):
set_seed(42)
# Load pre-trained model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
# Initialize secondary learner to use embedding weights of model
UpperCamelCase_ = SecondaryLearner(__lowercase)
# Train secondary learner
UpperCamelCase_ = train_secondary_learner(
__lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ):
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
UpperCamelCase_ = RandomSampler(__lowercase)
UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase)
UpperCamelCase_ = max_steps // (len(__lowercase)) + 1
UpperCamelCase_ = 0
UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase)
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowercase)
secondary_learner.eval()
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = []
UpperCamelCase_ = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
for epoch in range(int(__lowercase)):
for step, example in enumerate(__lowercase):
torch.cuda.empty_cache()
UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1)
UpperCamelCase_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase_ = model(__lowercase , labels=__lowercase)
UpperCamelCase_ = True
if secondary_learner is not None:
UpperCamelCase_ = secondary_learner.forward(
torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item()
observed_qs.append(float(__lowercase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase_ = -1
if predicted_q < threshold:
UpperCamelCase_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
UpperCamelCase_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __lowercase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task')
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=__lowercase , default=__lowercase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.')
parser.add_argument(
'--context_len' , default=32 , type=__lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq')
parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs')
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ')
parser.add_argument(
'--eval_interval' , default=10 , type=__lowercase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data')
parser.add_argument(
'--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set')
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner')
parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length')
parser.add_argument(
'--threshold' , default=1.0 , type=__lowercase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name')
parser.add_argument(
'--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
UpperCamelCase_ = joblib.load('data/IGF_values.jbl')
# Train secondary learner
UpperCamelCase_ = training_secondary_learner(
__lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 23 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : Optional[Any] = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Any = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 435 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _a :
"""simple docstring"""
A_ = MBartConfig
A_ = {}
A_ = """gelu"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Union[str, Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_ = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = TFMBartModel(config=_UpperCAmelCase ).get_decoder()
UpperCamelCase_ = inputs_dict['input_ids']
UpperCamelCase_ = input_ids[:1, :]
UpperCamelCase_ = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_ = inputs_dict['head_mask']
UpperCamelCase_ = 1
# first forward pass
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
UpperCamelCase_ = past_key_values[1]
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ):
if attention_mask is None:
UpperCamelCase_ = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A_ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ = True
A_ = False
A_ = False
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = TFMBartModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
A_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
A_ = """facebook/mbart-large-en-ro"""
@cached_property
def _UpperCAmelCase ( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> int:
UpperCamelCase_ = self.translate_src_text(**_UpperCAmelCase )
self.assertListEqual(self.expected_text , _UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' )
UpperCamelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase_ = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 23 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_( A : str , A : int , A : Union[str, Any]):
if (voltage, current, resistance).count(0) != 1:
raise ValueError('One and only one argument must be 0')
if resistance < 0:
raise ValueError('Resistance cannot be negative')
if voltage == 0:
return {"voltage": float(current * resistance)}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
def _snake_case (__lowercase):
UpperCamelCase_ = 1
for i in range(1 , num + 1):
fact *= i
return fact
def _snake_case (__lowercase):
UpperCamelCase_ = 0
while number > 0:
UpperCamelCase_ = number % 10
sum_of_digits += last_digit
UpperCamelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case (__lowercase = 100):
UpperCamelCase_ = factorial(__lowercase)
UpperCamelCase_ = split_and_add(__lowercase)
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 23 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : str = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case__ : str = logging.get_logger(__name__)
def _snake_case (__lowercase):
if isinstance(__lowercase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(__lowercase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(__lowercase):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""")
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""pixel_values"""]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = size if size is not None else {'shortest_edge': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = resample
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size['shortest_edge'] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
UpperCamelCase_ = (size['height'], size['width'])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> int:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_ = to_numpy_array(_UpperCAmelCase )
if do_resize:
UpperCamelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
UpperCamelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
UpperCamelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase )
if do_normalize:
UpperCamelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
UpperCamelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image:
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCamelCase_ = make_batched(_UpperCAmelCase )
UpperCamelCase_ = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
UpperCamelCase_ = {'pixel_values': videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : List[Any] = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 171 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
class _a ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.1_5 , _UpperCAmelCase = 0.0_1 , _UpperCAmelCase = 1_3_4_8.0 , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
UpperCamelCase_ = sigma_max
# setable values
UpperCamelCase_ = None
self.set_sigmas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor:
return sample
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> str:
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase_ = torch.linspace(1 , _UpperCAmelCase , _UpperCAmelCase , device=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Any:
UpperCamelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase_ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ) , math.log(_UpperCAmelCase ) , _UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
UpperCamelCase_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase_ = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase_ = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase_ = self.get_adjacent_sigma(_UpperCAmelCase , _UpperCAmelCase ).to(sample.device )
UpperCamelCase_ = torch.zeros_like(_UpperCAmelCase )
UpperCamelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase_ = diffusion.unsqueeze(-1 )
UpperCamelCase_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase_ = randn_tensor(
sample.shape , layout=sample.layout , generator=_UpperCAmelCase , device=sample.device , dtype=sample.dtype )
UpperCamelCase_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_UpperCAmelCase , prev_sample_mean=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase_ = step_size.unsqueeze(-1 )
UpperCamelCase_ = sample + step_size * model_output
UpperCamelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase_ = timesteps.to(original_samples.device )
UpperCamelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None]
)
UpperCamelCase_ = noise + original_samples
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 23 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Optional[int] = UNetaDModel(
sample_size=(32, 64) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(128, 128) ,down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") ,up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") ,)
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(128, 128) ,down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") ,up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") ,cross_attention_dim=10 ,)
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : str = AutoencoderKL(
sample_size=(128, 64) ,in_channels=1 ,out_channels=1 ,latent_channels=1 ,layers_per_block=2 ,block_out_channels=(128, 128) ,down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") ,up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") ,)
snake_case : List[str] = UNetaDModel(
sample_size=(64, 32) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(128, 128) ,down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") ,up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") ,)
return vqvae, unet
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Optional[Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] ,y_res=self.dummy_unet.config.sample_size[0] ,)
snake_case : List[str] = DDPMScheduler()
snake_case : Dict = AudioDiffusionPipeline(vqvae=_UpperCAmelCase ,unet=self.dummy_unet ,mel=_UpperCAmelCase ,scheduler=_UpperCAmelCase )
snake_case : int = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
snake_case : int = pipe(generator=_UpperCAmelCase ,steps=4 )
snake_case : Any = output.audios[0]
snake_case : List[str] = output.images[0]
snake_case : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
snake_case : Optional[Any] = pipe(generator=_UpperCAmelCase ,steps=4 ,return_dict=_UpperCAmelCase )
snake_case : Optional[Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
snake_case : int = np.frombuffer(image.tobytes() ,dtype="""uint8""" )[:10]
snake_case : List[Any] = np.frombuffer(image_from_tuple.tobytes() ,dtype="""uint8""" )[:10]
snake_case : Tuple = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
snake_case : List[Any] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] ,y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] ,)
snake_case : Tuple = DDIMScheduler()
snake_case : Dict = self.dummy_vqvae_and_unet
snake_case : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] ,unet=dummy_vqvae_and_unet[1] ,mel=_UpperCAmelCase ,scheduler=_UpperCAmelCase )
snake_case : Optional[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
np.random.seed(0 )
snake_case : List[Any] = np.random.uniform(-1 ,1 ,((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
snake_case : Optional[int] = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
snake_case : List[str] = pipe(raw_audio=_UpperCAmelCase ,generator=_UpperCAmelCase ,start_step=5 ,steps=10 )
snake_case : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
snake_case : Dict = np.frombuffer(image.tobytes() ,dtype="""uint8""" )[:10]
snake_case : List[Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
snake_case : int = self.dummy_unet_condition
snake_case : Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] ,unet=_UpperCAmelCase ,mel=_UpperCAmelCase ,scheduler=_UpperCAmelCase )
snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
np.random.seed(0 )
snake_case : List[str] = torch.rand((1, 1, 10) )
snake_case : Dict = pipe(generator=_UpperCAmelCase ,encoding=_UpperCAmelCase )
snake_case : Optional[Any] = output.images[0]
snake_case : str = np.frombuffer(image.tobytes() ,dtype="""uint8""" )[:10]
snake_case : int = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = torch_device
snake_case : List[str] = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
snake_case : Dict = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
snake_case : int = pipe(generator=_UpperCAmelCase )
snake_case : Any = output.audios[0]
snake_case : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
snake_case : List[Any] = np.frombuffer(image.tobytes() ,dtype="""uint8""" )[:10]
snake_case : Optional[int] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 36 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[int] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ : Union[str, Any] = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase_ : Dict = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase_ : Any = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ : Dict = """roberta"""
elif args.model_type == "gpt2":
UpperCamelCase_ : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase_ : List[str] = """transformer"""
UpperCamelCase_ : List[Any] = model.state_dict()
UpperCamelCase_ : Optional[int] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase_ : int = state_dict[F"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase_ : Optional[Any] = F"{prefix}.embeddings.{w}.weight"
UpperCamelCase_ : Optional[int] = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase_ : Optional[int] = F"{prefix}.embeddings.LayerNorm.{w}"
UpperCamelCase_ : List[str] = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase_ : Optional[Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase_ : Union[str, Any] = state_dict[
F"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
UpperCamelCase_ : Optional[Any] = state_dict[F"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase_ : int = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase_ : Optional[int] = state_dict[F"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ : List[str] = state_dict[F"lm_head.dense.{w}"]
UpperCamelCase_ : Dict = state_dict[F"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase_ : Optional[int] = state_dict[F"{prefix}.ln_f.{w}"]
UpperCamelCase_ : str = state_dict["""lm_head.weight"""]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 331 |
import datasets
from .evaluate import evaluate
snake_case__ : int = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
snake_case__ : Union[str, Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
snake_case__ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
UpperCamelCase_ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def _snake_case ():
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
def _snake_case ():
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@require_beam
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> List[str]:
import apache_beam as beam
UpperCamelCase_ = beam.io.parquetio.WriteToParquet
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 23 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase (snake_case__ : str ) -> List[str]:
'''simple docstring'''
if not nums:
return 0
lowerCAmelCase = nums[0]
lowerCAmelCase = 0
for num in nums[1:]:
lowerCAmelCase , lowerCAmelCase = (
max_excluding + num,
max(__lowercase , __lowercase ),
)
return max(__lowercase , __lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = AlbertForPreTraining(__lowercase)
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE_ = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ):
if attention_mask is None:
__a : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__a : Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__a : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__a : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=False , _A=99 , _A=16 , _A=2 , _A=4 , _A=4 , _A="gelu" , _A=0.1 , _A=0.1 , _A=32 , _A=2 , _A=1 , _A=0 , _A=0.02 , ) -> List[str]:
__a : int = parent
__a : str = batch_size
__a : int = seq_length
__a : int = is_training
__a : int = use_labels
__a : Optional[int] = vocab_size
__a : List[Any] = hidden_size
__a : Any = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : List[Any] = hidden_act
__a : Any = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : List[str] = max_position_embeddings
__a : int = eos_token_id
__a : List[str] = pad_token_id
__a : Dict = bos_token_id
__a : int = initializer_range
def __magic_name__ ( self ) -> str:
__a : Optional[int] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__a : int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__a : List[str] = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__a : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , )
__a : Dict = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def __magic_name__ ( self ) -> List[str]:
__a , __a : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def __magic_name__ ( self , _A , _A , _A ) -> Optional[Any]:
__a : int = 20
__a : Union[str, Any] = model_class_name(_UpperCAmelCase )
__a : List[Any] = model.encode(inputs_dict['input_ids'] )
__a , __a : List[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__a : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__a : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__a : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : str = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__a : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , )
__a : int = model.decode(_UpperCAmelCase , _UpperCAmelCase )
__a : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def __magic_name__ ( self , _A , _A , _A ) -> Tuple:
__a : List[str] = 20
__a : List[Any] = model_class_name(_UpperCAmelCase )
__a : List[Any] = model.encode(inputs_dict['input_ids'] )
__a , __a : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__a : List[str] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__a : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__a : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : Tuple = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__a : str = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : Any = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase )
__a : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_A = 99
def __magic_name__ ( self ) -> Union[str, Any]:
__a : List[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__a : str = input_ids.shape[0]
__a : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __magic_name__ ( self ) -> Optional[int]:
__a , __a , __a : List[Any] = self._get_config_and_data()
__a : Optional[int] = FlaxBlenderbotForConditionalGeneration(_UpperCAmelCase )
__a : Tuple = lm_model(input_ids=_UpperCAmelCase )
__a : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _UpperCAmelCase )
def __magic_name__ ( self ) -> Optional[Any]:
__a : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__a : Any = FlaxBlenderbotForConditionalGeneration(_UpperCAmelCase )
__a : Optional[int] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__a : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__a : int = lm_model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
__a : Dict = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _UpperCAmelCase )
def __magic_name__ ( self ) -> Optional[Any]:
__a : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__a : Tuple = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__a : Optional[Any] = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
__a : Tuple = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase , UpperCAmelCase__ ):
"""simple docstring"""
_A = True
_A = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_A = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __magic_name__ ( self ) -> Dict:
__a : Any = FlaxBlenderbotModelTester(self )
def __magic_name__ ( self ) -> str:
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __magic_name__ ( self ) -> Tuple:
__a , __a : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __magic_name__ ( self ) -> List[Any]:
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__a : Any = model_class(_UpperCAmelCase )
@jax.jit
def encode_jitted(_A , _A=None , **_A ):
return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
with self.subTest('JIT Enabled' ):
__a : Dict = encode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__a : Optional[int] = encode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __magic_name__ ( self ) -> Tuple:
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : Optional[int] = model_class(_UpperCAmelCase )
__a : Union[str, Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__a : Tuple = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_A , _A , _A ):
return model.decode(
decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , )
with self.subTest('JIT Enabled' ):
__a : Optional[Any] = decode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__a : str = decode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __magic_name__ ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
__a : Any = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__a : int = np.ones((1, 1) ) * model.config.eos_token_id
__a : List[Any] = model(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def __magic_name__ ( self ) -> List[Any]:
__a : Optional[Any] = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
__a : str = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
__a : Union[str, Any] = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_UpperCAmelCase )
__a : Tuple = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
__a : Optional[Any] = ['Sam']
__a : Optional[int] = tokenizer(_UpperCAmelCase , return_tensors='jax' )
__a : int = model.generate(**_UpperCAmelCase , **_UpperCAmelCase )
__a : str = 'Sam is a great name. It means "sun" in Gaelic.'
__a : List[str] = tokenizer.batch_decode(_UpperCAmelCase , **_UpperCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 597 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase_ = bertabert.config.encoder.vocab_size
UpperCamelCase_ = tokenizer.sep_token_id
UpperCamelCase_ = tokenizer.cls_token_id
UpperCamelCase_ = 128
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase_ = train_dataset.select(range(32 ) )
UpperCamelCase_ = val_dataset.select(range(16 ) )
UpperCamelCase_ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase_ = inputs.input_ids
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = outputs.input_ids
UpperCamelCase_ = outputs.input_ids.copy()
UpperCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase_ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
UpperCamelCase_ = pred.label_ids
UpperCamelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase_ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 23 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : str = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Optional[int] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class A_ ( UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE = GPTaTokenizer
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Dict="<|endoftext|>" , __SCREAMING_SNAKE_CASE : str="<|endoftext|>" , __SCREAMING_SNAKE_CASE : Dict="<|endoftext|>" , __SCREAMING_SNAKE_CASE : Dict=False , **__SCREAMING_SNAKE_CASE : Any , ):
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
__a = kwargs.pop("add_bos_token" , _UpperCAmelCase )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _UpperCAmelCase ) != add_prefix_space:
__a = getattr(_UpperCAmelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**_UpperCAmelCase )
__a = add_prefix_space
def _UpperCAmelCase ( self : Dict , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Optional[int] ):
__a = kwargs.get("is_split_into_words" , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self : Optional[int] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a = kwargs.get("is_split_into_words" , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any = None ):
__a = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _UpperCAmelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [self.eos_token_id] )
if len(_UpperCAmelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 197 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : Dict = 1_6
snake_case__ : List[str] = 3_2
def _snake_case (__lowercase , __lowercase = 16):
UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase_ = load_dataset('glue' , 'mrpc')
def tokenize_function(__lowercase):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(__lowercase):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ = 8
else:
UpperCamelCase_ = None
return tokenizer.pad(
__lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
UpperCamelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case__ : List[str] = mocked_dataloaders # noqa: F811
def _snake_case (__lowercase , __lowercase):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1":
UpperCamelCase_ = 2
# New Code #
UpperCamelCase_ = int(args.gradient_accumulation_steps)
# Initialize accelerator
UpperCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config['lr']
UpperCamelCase_ = int(config['num_epochs'])
UpperCamelCase_ = int(config['seed'])
UpperCamelCase_ = int(config['batch_size'])
UpperCamelCase_ = evaluate.load('glue' , 'mrpc')
set_seed(__lowercase)
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase)
# Instantiate scheduler
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# Now we train the model
for epoch in range(__lowercase):
model.train()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowercase):
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = output.loss
accelerator.backward(__lowercase)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = outputs.logits.argmax(dim=-1)
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
UpperCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowercase)
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowercase , __lowercase)
if __name__ == "__main__":
main()
| 23 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowerCamelCase ( A__ : Union[str, Any]=32 , A__ : Optional[Any]=10 , A__ : Any=100 , A__ : Optional[Any]=1026 , A__ : Union[str, Any]=True , A__ : List[Any]="data/tokenized_stories_train_wikitext103.jbl" , A__ : Optional[int]="igf_context_pairs.jbl" , ) -> List[str]:
set_seed(3 )
# generate train_data and objective_set
lowerCamelCase_, lowerCamelCase_ : Tuple = generate_datasets(
__lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowerCamelCase_ : Optional[Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
lowerCamelCase_ : Optional[Any] = load_gpta("""gpt2""" ).to(__lowercase )
print("""computing perplexity on objective set""" )
lowerCamelCase_ : Tuple = compute_perplexity(__lowercase , __lowercase , __lowercase ).item()
print("""perplexity on objective set:""" , __lowercase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowerCamelCase ( A__ : str , A__ : Dict=15 , A__ : List[Any]=128 , A__ : str=100 , A__ : int="igf_model.pt" , ) -> List[Any]:
set_seed(42 )
# Load pre-trained model
lowerCamelCase_ : int = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
lowerCamelCase_ : Optional[int] = SecondaryLearner(__lowercase )
# Train secondary learner
lowerCamelCase_ : int = train_secondary_learner(
__lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowerCamelCase ( A__ : Dict , A__ : Any , A__ : Any , A__ : Optional[int]=32 , A__ : Dict=1000 , A__ : str=16 , A__ : int=1.0 , A__ : Union[str, Any]=recopy_gpta , A__ : Tuple=None , A__ : List[Any]=10 , A__ : List[str]="gpt2_finetuned.pt" , ) -> Dict:
lowerCamelCase_ : Union[str, Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
lowerCamelCase_ : Optional[int] = RandomSampler(__lowercase )
lowerCamelCase_ : int = DataLoader(__lowercase , sampler=__lowercase )
lowerCamelCase_ : Union[str, Any] = max_steps // (len(__lowercase )) + 1
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : Optional[int] = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = recopy_model(__lowercase , __lowercase , __lowercase )
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowercase )
secondary_learner.eval()
lowerCamelCase_ : int = []
lowerCamelCase_ : str = 0
lowerCamelCase_ : str = []
lowerCamelCase_ : int = []
# Compute the performance of the transformer model at the beginning
lowerCamelCase_ : Union[str, Any] = compute_perplexity(__lowercase , __lowercase , __lowercase )
test_perps.append(__lowercase )
print("""Test perplexity, step""" , __lowercase , """:""" , __lowercase )
for epoch in range(int(__lowercase ) ):
for step, example in enumerate(__lowercase ):
torch.cuda.empty_cache()
lowerCamelCase_ : List[str] = random.randint(0 , example.size(2 ) - context_len - 1 )
lowerCamelCase_ : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowerCamelCase_ : List[str] = model(__lowercase , labels=__lowercase )
lowerCamelCase_ : str = True
if secondary_learner is not None:
lowerCamelCase_ : Optional[int] = secondary_learner.forward(
torch.tensor(__lowercase , dtype=torch.long , device=__lowercase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__lowercase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowerCamelCase_ : Any = -1
if predicted_q < threshold:
lowerCamelCase_ : Tuple = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowerCamelCase_ : Optional[Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowerCamelCase_ : List[str] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowerCamelCase_ : List[str] = compute_perplexity(__lowercase , __lowercase , __lowercase )
test_perps.append(__lowercase )
print("""Test perplexity, step""" , __lowercase , """:""" , __lowercase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __lowercase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowerCamelCase ( ) -> List[str]:
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__lowercase , type=__lowercase , required=__lowercase , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__lowercase , type=__lowercase , required=__lowercase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=__lowercase , default=__lowercase , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=__lowercase , default=__lowercase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=__lowercase , type=__lowercase , required=__lowercase , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=__lowercase , type=__lowercase , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=__lowercase , default=__lowercase , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=__lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=__lowercase , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=__lowercase , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1000 , type=__lowercase , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=__lowercase , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=__lowercase , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=__lowercase , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=__lowercase , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1026 , type=__lowercase , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=__lowercase , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=__lowercase , type=__lowercase , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=__lowercase , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__lowercase , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=__lowercase , type=__lowercase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
lowerCamelCase_ : Tuple = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
lowerCamelCase_ : Optional[int] = training_secondary_learner(
__lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
lowerCamelCase_ : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowerCamelCase_, lowerCamelCase_ : List[str] = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1026 , trim=__lowercase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 278 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = is_training
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = num_queries
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_size
UpperCamelCase_ = max_size
UpperCamelCase_ = num_labels
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = hidden_dim
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
UpperCamelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
UpperCamelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase_ = self.num_queries
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = [1, 1, 1, 1]
UpperCamelCase_ = self.num_channels
UpperCamelCase_ = 64
UpperCamelCase_ = 128
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
return config
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = output.encoder_hidden_states
UpperCamelCase_ = output.pixel_decoder_hidden_states
UpperCamelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any:
with torch.no_grad():
UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
UpperCamelCase_ = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = (self.model_tester.min_size,) * 2
UpperCamelCase_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
UpperCamelCase_ = self.model_tester.get_config()
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
UpperCamelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case__ : List[Any] = 1E-4
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# masks_queries_logits
UpperCamelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
UpperCamelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase )
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']]
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 23 | 0 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : int , *,
lowercase__ : Any = 4 , lowercase__ : Dict = 768 , lowercase__ : List[str] , lowercase__ : Union[str, Any] , ) ->Any:
'''simple docstring'''
super().__init__()
_UpperCamelCase : Any = nn.Parameter(torch.zeros(_UpperCAmelCase ) )
# parameters for additional clip time embeddings
_UpperCamelCase : List[Any] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
_UpperCamelCase : Tuple = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
# parameters for encoder hidden states
_UpperCamelCase : str = clip_extra_context_tokens
_UpperCamelCase : Union[str, Any] = nn.Linear(
_UpperCAmelCase , self.clip_extra_context_tokens * cross_attention_dim )
_UpperCamelCase : int = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
_UpperCamelCase : int = nn.LayerNorm(_UpperCAmelCase )
def snake_case__ ( self : Optional[int] , *, lowercase__ : Dict , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_UpperCamelCase : Tuple = image_embeddings.shape[0]
_UpperCamelCase : Optional[int] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_UpperCamelCase : int = classifier_free_guidance_embeddings.expand(
_UpperCAmelCase , -1 )
_UpperCamelCase : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_UpperCamelCase : Any = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_UpperCamelCase : Dict = self.embedding_proj(_UpperCAmelCase )
_UpperCamelCase : Optional[Any] = self.clip_image_embeddings_project_to_time_embeddings(_UpperCAmelCase )
_UpperCamelCase : Optional[Any] = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_UpperCamelCase : Dict = self.clip_extra_context_tokens_proj(_UpperCAmelCase )
_UpperCamelCase : Optional[int] = clip_extra_context_tokens.reshape(_UpperCAmelCase , -1 , self.clip_extra_context_tokens )
_UpperCamelCase : str = clip_extra_context_tokens.permute(0 , 2 , 1 )
_UpperCamelCase : Any = self.encoder_hidden_states_proj(_UpperCAmelCase )
_UpperCamelCase : Tuple = self.text_encoder_hidden_states_norm(_UpperCAmelCase )
_UpperCamelCase : str = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 435 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """vision-encoder-decoder"""
A_ = True
def __init__( self , **_UpperCAmelCase ) -> Dict:
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCamelCase_ = kwargs.pop('encoder' )
UpperCamelCase_ = encoder_config.pop('model_type' )
UpperCamelCase_ = kwargs.pop('decoder' )
UpperCamelCase_ = decoder_config.pop('model_type' )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = True
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCamelCase_ = True
UpperCamelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.encoder.to_dict()
UpperCamelCase_ = self.decoder.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
import torch
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = super().generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape
UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_ = dummy_input.pop('input_ids' )
UpperCamelCase_ = dummy_input.pop('attention_mask' )
UpperCamelCase_ = torch.zeros(_UpperCAmelCase )
return common_inputs
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> None:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig:
UpperCamelCase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
| 23 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
lowerCAmelCase : str = {"""allegro/herbert-base-cased""": 5_14}
lowerCAmelCase : Union[str, Any] = {}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = HerbertTokenizer
def __init__( self , A_=None , A_=None , A_=None , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_="</s>" , **A_ , )-> Optional[int]:
'''simple docstring'''
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , **_UpperCAmelCase , )
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = False )-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , A_ , A_ = None )-> Tuple[str]:
'''simple docstring'''
UpperCamelCase = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 3 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = MobileBertForPreTraining(__lowercase)
# Load weights from tf checkpoint
UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
def A ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__snake_case = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
__snake_case = CLIPTextModel(_UpperCAmelCase )
__snake_case = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__snake_case = 77
__snake_case = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A ( self : Tuple , a_ : Dict , a_ : Any=0 ):
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("mps" ):
__snake_case = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A ( self : List[str] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def A ( self : Optional[int] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
torch.manual_seed(0 )
__snake_case = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
__snake_case = RobertaSeriesModelWithTransformation(_UpperCAmelCase )
__snake_case = text_encoder
__snake_case = AltDiffusionPipeline(**_UpperCAmelCase )
__snake_case = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case = self.get_dummy_inputs(_UpperCAmelCase )
__snake_case = "A photo of an astronaut"
__snake_case = alt_pipe(**_UpperCAmelCase )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
__snake_case = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
__snake_case = RobertaSeriesModelWithTransformation(_UpperCAmelCase )
__snake_case = text_encoder
__snake_case = AltDiffusionPipeline(**_UpperCAmelCase )
__snake_case = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case = self.get_dummy_inputs(_UpperCAmelCase )
__snake_case = alt_pipe(**_UpperCAmelCase )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=_UpperCAmelCase )
__snake_case = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case = "A painting of a squirrel eating a burger"
__snake_case = torch.manual_seed(0 )
__snake_case = alt_pipe([prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Any ):
"""simple docstring"""
__snake_case = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
__snake_case = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
__snake_case = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case = "A painting of a squirrel eating a burger"
__snake_case = torch.manual_seed(0 )
__snake_case = alt_pipe([prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="numpy" )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 69 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_MASKED_LM_MAPPING
A_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
] , )
UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
UpperCamelCase_ = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 12790,
'token_str': ' Lyon',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = fill_masker.tokenizer
UpperCamelCase_ = fill_masker.model
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker('This is' )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs]
UpperCamelCase_ = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
| 23 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_UpperCAmelCase).to(_UpperCAmelCase)
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''google/mt5-small''')
lowerCAmelCase_ : Optional[int] = tokenizer('''Hello there''' , return_tensors='''pt''').input_ids
lowerCAmelCase_ : Tuple = tokenizer('''Hi I am''' , return_tensors='''pt''').input_ids
lowerCAmelCase_ : Union[str, Any] = model(input_ids.to(_UpperCAmelCase) , labels=labels.to(_UpperCAmelCase)).loss
lowerCAmelCase_ : Optional[Any] = -(labels.shape[-1] * loss.item())
lowerCAmelCase_ : Tuple = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 171 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionSAGPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase )
UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[Any]:
if str(_UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCamelCase_ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
UpperCamelCase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 23 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowercase ( __A : List[str] , __A : Optional[Any] , __A : Dict , __A : str , __A : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = cva.getAffineTransform(__lowercase , __lowercase )
return cva.warpAffine(__lowercase , __lowercase , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowercase : Optional[int] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowercase : Any = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowercase : Optional[Any] = gray_img.shape
# set different points to rotate image
__lowercase : List[Any] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowercase : Union[str, Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowercase : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowercase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowercase : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowercase : Optional[int] = plt.figure(1)
__lowercase : int = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 36 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
snake_case__ : List[str] = TypeVar("""T""")
def _snake_case (__lowercase):
return (position - 1) // 2
def _snake_case (__lowercase):
return (2 * position) + 1
def _snake_case (__lowercase):
return (2 * position) + 2
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = []
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def _UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase_ = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase_ , UpperCamelCase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase_ , UpperCamelCase_ = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Update the weight of the given key
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ = (elem, weight)
if position > 0:
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase_ = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ = get_child_left_position(_UpperCAmelCase )
UpperCamelCase_ = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Swap the nodes at the given positions
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ , UpperCamelCase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase_ = nodea_pos
UpperCamelCase_ = nodea_pos
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase_ = {}
self.nodes += 1
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
UpperCamelCase_ = weight
UpperCamelCase_ = weight
def _snake_case (__lowercase , ):
UpperCamelCase_ = {node: maxsize for node in graph.connections}
UpperCamelCase_ = {node: None for node in graph.connections}
UpperCamelCase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowercase , __lowercase)
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase_ = priority_queue.extract_min()
UpperCamelCase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
return dist, parent
| 23 | 0 |
'''simple docstring'''
def _lowerCAmelCase (_lowercase = 1_00_00_00 ):
"""simple docstring"""
a__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __lowercase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 331 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ : Dict = TypeVar("""T""")
class _a ( Generic[T] ):
"""simple docstring"""
A_ = 42 # Cache store of keys
A_ = 42 # References of the keys in cache
A_ = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = deque()
UpperCamelCase_ = set()
if not n:
UpperCamelCase_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCamelCase_ = n
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase_ = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> None:
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 23 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase_ = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
class A (UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = LEDTokenizer
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="replace" , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=False , lowercase_=True , **lowercase_ , ) -> Tuple:
'''simple docstring'''
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
_snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _UpperCAmelCase ) != add_prefix_space:
_snake_case : List[Any] = getattr(_UpperCAmelCase , pre_tok_state.pop('''type''' ) )
_snake_case : Tuple = add_prefix_space
_snake_case : Tuple = pre_tok_class(**_UpperCAmelCase )
_snake_case : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_snake_case : str = '''post_processor'''
_snake_case : Tuple = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
_snake_case : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case : List[str] = tuple(state['''sep'''] )
if "cls" in state:
_snake_case : List[str] = tuple(state['''cls'''] )
_snake_case : Union[str, Any] = False
if state.get('''add_prefix_space''' , _UpperCAmelCase ) != add_prefix_space:
_snake_case : List[Any] = add_prefix_space
_snake_case : Optional[Any] = True
if state.get('''trim_offsets''' , _UpperCAmelCase ) != trim_offsets:
_snake_case : Optional[int] = trim_offsets
_snake_case : Any = True
if changes_to_apply:
_snake_case : Optional[int] = getattr(_UpperCAmelCase , state.pop('''type''' ) )
_snake_case : Optional[int] = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __a ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
_snake_case : Dict = value
def __a ( self , *lowercase_ , **lowercase_ ) -> BatchEncoding:
'''simple docstring'''
_snake_case : List[Any] = kwargs.get('''is_split_into_words''' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def __a ( self , *lowercase_ , **lowercase_ ) -> BatchEncoding:
'''simple docstring'''
_snake_case : str = kwargs.get('''is_split_into_words''' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def __a ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
'''simple docstring'''
_snake_case : Any = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def __a ( self , lowercase_ , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
_snake_case : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self , lowercase_ , lowercase_ = None ) -> List[int]:
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ) -> dict:
'''simple docstring'''
_snake_case : str = super()._pad(
encoded_inputs=_UpperCAmelCase , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
_snake_case : List[str] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_snake_case : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_snake_case : Union[str, Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(_UpperCAmelCase )
if needs_to_be_padded:
_snake_case : List[Any] = len(_UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_snake_case : Optional[Any] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_snake_case : str = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 326 |
import numpy as np
def _snake_case (__lowercase):
return 1 / (1 + np.exp(-vector))
def _snake_case (__lowercase):
return vector * sigmoid(__lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowercase (snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] = None , snake_case__ : str = None , snake_case__ : Optional[int] = None , ) -> List[Any]:
'''simple docstring'''
if config_name_or_path is None:
lowerCAmelCase = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
lowerCAmelCase = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCAmelCase = question_encoder_name_or_path
lowerCAmelCase = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
lowerCAmelCase = RagConfig.from_pretrained(__lowercase )
lowerCAmelCase = AutoConfig.from_pretrained(__lowercase )
lowerCAmelCase = AutoConfig.from_pretrained(__lowercase )
lowerCAmelCase = gen_config
lowerCAmelCase = question_encoder_config
lowerCAmelCase = model_class.from_pretrained_question_encoder_generator(
__lowercase , __lowercase , config=__lowercase )
rag_model.save_pretrained(__lowercase )
# Sanity check.
model_class.from_pretrained(__lowercase )
# Save tokenizers.
lowerCAmelCase = AutoTokenizer.from_pretrained(__lowercase )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
lowerCAmelCase = AutoTokenizer.from_pretrained(__lowercase )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
a = parser.parse_args()
a = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 169 |
import math
from datetime import datetime, timedelta
def _snake_case (__lowercase):
UpperCamelCase_ = year % 19
UpperCamelCase_ = year % 4
UpperCamelCase_ = year % 7
UpperCamelCase_ = math.floor(year / 100)
UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25)
UpperCamelCase_ = leap_day_inhibits / 4
UpperCamelCase_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 19)
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 18)
else:
return datetime(__lowercase , 3 , 22) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday))
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
snake_case__ : Dict = """will be""" if year > datetime.now().year else """was"""
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 23 | 0 |
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
SCREAMING_SNAKE_CASE_ = """1"""
SCREAMING_SNAKE_CASE_ = """0"""
SCREAMING_SNAKE_CASE_ = """1"""
SCREAMING_SNAKE_CASE_ = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
SCREAMING_SNAKE_CASE_ = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
SCREAMING_SNAKE_CASE_ = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
SCREAMING_SNAKE_CASE_ = ort.RunOptions()
SCREAMING_SNAKE_CASE_ = 1_2_8
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = np.ones((batch, sequence), dtype=np.intaa)
SCREAMING_SNAKE_CASE_ = np.ones((batch, sequence), dtype=np.intaa)
SCREAMING_SNAKE_CASE_ = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = 2_0_0_0
SCREAMING_SNAKE_CASE_ = {}
for iter in range(max_iters):
SCREAMING_SNAKE_CASE_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1_0_0_0 / max_iters))
| 597 |
import requests
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = {'Content-Type': 'application/json'}
UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase)
if response.status_code != 200:
UpperCamelCase_ = (
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__lowercase)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 23 | 0 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class A_ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE = MODEL_FOR_MASKED_LM_MAPPING
_SCREAMING_SNAKE_CASE = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self : int ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self : Tuple ):
__a = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
__a = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1E-05, "token": 3_80_15, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1E-05, "token": 2_55_06, "token_str": " accuser"},
] , )
__a = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1E-05,
"token": 3_80_15,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1E-05,
"token": 2_55_06,
"token_str": " accuser",
},
] , )
__a = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2E-05, "token": 1_36_06, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2E-05, "token": 34_99, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9E-05, "token": 29_41, "token_str": " Te"},
] , )
@require_torch
def _UpperCAmelCase ( self : Dict ):
__a = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
__a = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2E-05, "token": 3_56_76, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2E-05, "token": 1_64_16, "token_str": "ELS"},
] , )
__a = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2E-05,
"token": 3_56_76,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2E-05, "token": 1_64_16, "token_str": "ELS"},
] , )
__a = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1E-05, "token": 34_99, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2E-05, "token": 29_41, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2E-05, "token": 1_36_06, "token_str": " Clara"},
] , )
__a = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
"score": 2.2E-05,
"token": 3_56_76,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2E-05, "token": 1_64_16, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2E-05,
"token": 3_56_76,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2E-05, "token": 1_64_16, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self : Any ):
__a = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
__a = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def _UpperCAmelCase ( self : Optional[Any] ):
__a = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def _UpperCAmelCase ( self : int ):
__a = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(_UpperCAmelCase )
def _UpperCAmelCase ( self : int , __SCREAMING_SNAKE_CASE : str ):
__a = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 6_10, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 15_73, "token_str": " Chris"},
] , )
__a = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 22_01,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 1_27_90,
"token_str": " Lyon",
},
] , )
__a = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 34_99, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 1_36_06, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 29_41, "token_str": " Te"},
] , )
@require_torch
def _UpperCAmelCase ( self : List[Any] ):
__a = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
__a = None
__a = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def _UpperCAmelCase ( self : Optional[Any] ):
__a = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
__a = None
__a = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
__a = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
__a = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
__a = fill_masker.tokenizer
__a = fill_masker.model
__a = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
__a = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
__a = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker("This is" )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple ):
__a = tokenizer.get_vocab()
__a = sorted(vocab.keys() )[:2]
# Pipeline argument
__a = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
__a = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
__a = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _UpperCAmelCase )
__a = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
__a = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
__a = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
__a = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _UpperCAmelCase )
__a = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
__a = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
__a = [top_mask["token_str"] for top_mask in outputs]
__a = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
__a = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
__a = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
__a = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
__a = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""] )
with self.assertRaises(_UpperCAmelCase ):
__a = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="" )
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ):
__a = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
__a = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
__a = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
__a = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any ):
__a = tokenizer.get_vocab()
__a = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
__a = sorted(vocab.keys() )[:3]
__a = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__a = [el["token_str"] for el in sorted(_UpperCAmelCase , key=lambda __SCREAMING_SNAKE_CASE : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
__a = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ):
__a = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
__a = tokenizer.get_vocab()
# String duplicates + id duplicates
__a = sorted(vocab.keys() )[:3]
__a = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__a = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def _UpperCAmelCase ( self : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int ):
__a = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
__a = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
] , )
| 197 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCamelCase_ = input_file.read()
UpperCamelCase_ = regexp.search(_UpperCAmelCase )
return match
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCamelCase_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase_ = regexp.finditer(_UpperCAmelCase )
UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 23 | 0 |
from ...configuration_utils import PretrainedConfig
class SCREAMING_SNAKE_CASE_ (UpperCAmelCase__ ):
'''simple docstring'''
_a = "bert-generation"
def __init__( self : int , __a : Optional[int]=50_358 , __a : str=1_024 , __a : List[str]=24 , __a : Optional[Any]=16 , __a : Any=4_096 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : Tuple=512 , __a : Any=0.02 , __a : List[Any]=1e-12 , __a : Union[str, Any]=0 , __a : Tuple=2 , __a : Union[str, Any]=1 , __a : str="absolute" , __a : str=True , **__a : Any , ) ->Optional[int]:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = vocab_size
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : str = num_hidden_layers
lowerCamelCase_ : List[str] = num_attention_heads
lowerCamelCase_ : Tuple = hidden_act
lowerCamelCase_ : Dict = intermediate_size
lowerCamelCase_ : Any = hidden_dropout_prob
lowerCamelCase_ : int = attention_probs_dropout_prob
lowerCamelCase_ : List[str] = max_position_embeddings
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
lowerCamelCase_ : Any = position_embedding_type
lowerCamelCase_ : Union[str, Any] = use_cache
| 278 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ):
set_seed(3)
# generate train_data and objective_set
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
__lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# load pretrained model
UpperCamelCase_ = load_gpta('gpt2').to(__lowercase)
print('computing perplexity on objective set')
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item()
print('perplexity on objective set:' , __lowercase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ):
set_seed(42)
# Load pre-trained model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
# Initialize secondary learner to use embedding weights of model
UpperCamelCase_ = SecondaryLearner(__lowercase)
# Train secondary learner
UpperCamelCase_ = train_secondary_learner(
__lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ):
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
UpperCamelCase_ = RandomSampler(__lowercase)
UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase)
UpperCamelCase_ = max_steps // (len(__lowercase)) + 1
UpperCamelCase_ = 0
UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase)
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowercase)
secondary_learner.eval()
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = []
UpperCamelCase_ = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
for epoch in range(int(__lowercase)):
for step, example in enumerate(__lowercase):
torch.cuda.empty_cache()
UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1)
UpperCamelCase_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase_ = model(__lowercase , labels=__lowercase)
UpperCamelCase_ = True
if secondary_learner is not None:
UpperCamelCase_ = secondary_learner.forward(
torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item()
observed_qs.append(float(__lowercase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase_ = -1
if predicted_q < threshold:
UpperCamelCase_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
UpperCamelCase_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __lowercase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task')
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=__lowercase , default=__lowercase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.')
parser.add_argument(
'--context_len' , default=32 , type=__lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq')
parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs')
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ')
parser.add_argument(
'--eval_interval' , default=10 , type=__lowercase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data')
parser.add_argument(
'--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set')
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner')
parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length')
parser.add_argument(
'--threshold' , default=1.0 , type=__lowercase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name')
parser.add_argument(
'--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
UpperCamelCase_ = joblib.load('data/IGF_values.jbl')
# Train secondary learner
UpperCamelCase_ = training_secondary_learner(
__lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 23 | 0 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(
UpperCAmelCase__ , R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
'''simple docstring'''
def snake_case__ ( self : Union[str, Any] , lowercase__ : Tuple ) ->np.ndarray:
'''simple docstring'''
if self.framework == "tf":
_UpperCamelCase : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_UpperCamelCase : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def snake_case__ ( self : Any , lowercase__ : int ) ->np.ndarray:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.get_masked_index(_UpperCAmelCase )
_UpperCamelCase : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def snake_case__ ( self : List[str] , lowercase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_UpperCAmelCase )
def snake_case__ ( self : Tuple , lowercase__ : List[str] , lowercase__ : Any=None , **lowercase__ : int ) ->Dict[str, GenericTensor]:
'''simple docstring'''
if return_tensors is None:
_UpperCamelCase : List[str] = self.framework
_UpperCamelCase : Optional[int] = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.ensure_exactly_one_mask_token(_UpperCAmelCase )
return model_inputs
def snake_case__ ( self : List[Any] , lowercase__ : str ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model(**_UpperCAmelCase )
_UpperCamelCase : Dict = model_inputs["input_ids"]
return model_outputs
def snake_case__ ( self : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any]=5 , lowercase__ : Any=None ) ->Any:
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
_UpperCamelCase : Optional[int] = target_ids.shape[0]
_UpperCamelCase : Any = model_outputs["input_ids"][0]
_UpperCamelCase : str = model_outputs["logits"]
if self.framework == "tf":
_UpperCamelCase : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_UpperCamelCase : Dict = outputs.numpy()
_UpperCamelCase : int = outputs[0, masked_index, :]
_UpperCamelCase : Dict = stable_softmax(_UpperCAmelCase , axis=-1 )
if target_ids is not None:
_UpperCamelCase : int = tf.gather_nd(tf.squeeze(_UpperCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
_UpperCamelCase : Any = tf.expand_dims(_UpperCAmelCase , 0 )
_UpperCamelCase : Tuple = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
_UpperCamelCase , _UpperCamelCase : List[str] = topk.values.numpy(), topk.indices.numpy()
else:
_UpperCamelCase : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_UpperCamelCase : Tuple = outputs[0, masked_index, :]
_UpperCamelCase : int = logits.softmax(dim=-1 )
if target_ids is not None:
_UpperCamelCase : List[str] = probs[..., target_ids]
_UpperCamelCase , _UpperCamelCase : List[str] = probs.topk(_UpperCAmelCase )
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Optional[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_UpperCamelCase : List[str] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_UpperCamelCase : Optional[Any] = input_ids.numpy().copy()
if target_ids is not None:
_UpperCamelCase : str = target_ids[p].tolist()
_UpperCamelCase : List[str] = p
# Filter padding out:
_UpperCamelCase : Union[str, Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_UpperCamelCase : Any = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
_UpperCamelCase : Tuple = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
if single_mask:
return result[0]
return result
def snake_case__ ( self : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : int=None ) ->Any:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCamelCase : List[str] = [targets]
try:
_UpperCamelCase : Dict = self.tokenizer.get_vocab()
except Exception:
_UpperCamelCase : Dict = {}
_UpperCamelCase : List[Any] = []
for target in targets:
_UpperCamelCase : Any = vocab.get(_UpperCAmelCase , _UpperCAmelCase )
if id_ is None:
_UpperCamelCase : Optional[Any] = self.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , max_length=1 , truncation=_UpperCAmelCase , )["input_ids"]
if len(_UpperCAmelCase ) == 0:
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
"We cannot replace it with anything meaningful, ignoring it" )
continue
_UpperCamelCase : Optional[int] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
_UpperCamelCase : int = list(set(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
_UpperCamelCase : Any = np.array(_UpperCAmelCase )
return target_ids
def snake_case__ ( self : str , lowercase__ : str=None , lowercase__ : str=None ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : List[Any] = {}
if targets is not None:
_UpperCamelCase : Union[str, Any] = self.get_target_ids(_UpperCAmelCase , _UpperCAmelCase )
_UpperCamelCase : int = target_ids
if top_k is not None:
_UpperCamelCase : str = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : Dict , lowercase__ : Optional[Any] , *lowercase__ : Tuple , **lowercase__ : int ) ->Any:
'''simple docstring'''
_UpperCamelCase : str = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) == 1:
return outputs[0]
return outputs
| 435 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _a :
"""simple docstring"""
A_ = MBartConfig
A_ = {}
A_ = """gelu"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Union[str, Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_ = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = TFMBartModel(config=_UpperCAmelCase ).get_decoder()
UpperCamelCase_ = inputs_dict['input_ids']
UpperCamelCase_ = input_ids[:1, :]
UpperCamelCase_ = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_ = inputs_dict['head_mask']
UpperCamelCase_ = 1
# first forward pass
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
UpperCamelCase_ = past_key_values[1]
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ):
if attention_mask is None:
UpperCamelCase_ = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A_ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ = True
A_ = False
A_ = False
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = TFMBartModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
A_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
A_ = """facebook/mbart-large-en-ro"""
@cached_property
def _UpperCAmelCase ( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> int:
UpperCamelCase_ = self.translate_src_text(**_UpperCAmelCase )
self.assertListEqual(self.expected_text , _UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' )
UpperCamelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase_ = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 23 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__):
lowerCAmelCase_ = """mgp-str"""
def __init__( self , A_=[32, 128] , A_=4 , A_=3 , A_=27 , A_=38 , A_=50257 , A_=30522 , A_=768 , A_=12 , A_=12 , A_=4.0 , A_=True , A_=False , A_=1e-5 , A_=0.0 , A_=0.0 , A_=0.0 , A_=False , A_=0.02 , **A_ , )-> Optional[int]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = max_token_length
UpperCamelCase = num_character_labels
UpperCamelCase = num_bpe_labels
UpperCamelCase = num_wordpiece_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = mlp_ratio
UpperCamelCase = distilled
UpperCamelCase = layer_norm_eps
UpperCamelCase = drop_rate
UpperCamelCase = qkv_bias
UpperCamelCase = attn_drop_rate
UpperCamelCase = drop_path_rate
UpperCamelCase = output_aa_attentions
UpperCamelCase = initializer_range
| 3 |
def _snake_case (__lowercase):
UpperCamelCase_ = 1
for i in range(1 , num + 1):
fact *= i
return fact
def _snake_case (__lowercase):
UpperCamelCase_ = 0
while number > 0:
UpperCamelCase_ = number % 10
sum_of_digits += last_digit
UpperCamelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case (__lowercase = 100):
UpperCamelCase_ = factorial(__lowercase)
UpperCamelCase_ = split_and_add(__lowercase)
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 23 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
a : Optional[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""yjernite/retribert-base-uncased""": 512,
}
a : List[str] = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = RetriBertTokenizer
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , a_ : Any=None , a_ : int=None , a_ : Optional[Any]=True , a_ : Optional[int]="[UNK]" , a_ : int="[SEP]" , a_ : Union[str, Any]="[PAD]" , a_ : Union[str, Any]="[CLS]" , a_ : str="[MASK]" , a_ : List[str]=True , a_ : Tuple=None , **a_ : Any , ):
"""simple docstring"""
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
__snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _UpperCAmelCase ) != tokenize_chinese_chars
):
__snake_case = getattr(_UpperCAmelCase , normalizer_state.pop("type" ) )
__snake_case = do_lower_case
__snake_case = strip_accents
__snake_case = tokenize_chinese_chars
__snake_case = normalizer_class(**_UpperCAmelCase )
__snake_case = do_lower_case
def A ( self : Optional[int] , a_ : str , a_ : Dict=None ):
"""simple docstring"""
__snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : List[Any] , a_ : Union[str, Any] , a_ : str = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Union[str, Any] , a_ : Union[str, Any] , a_ : List[str] = None ):
"""simple docstring"""
__snake_case = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 69 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case__ : str = logging.get_logger(__name__)
def _snake_case (__lowercase):
if isinstance(__lowercase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(__lowercase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(__lowercase):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""")
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""pixel_values"""]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = size if size is not None else {'shortest_edge': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = resample
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size['shortest_edge'] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
UpperCamelCase_ = (size['height'], size['width'])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> int:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_ = to_numpy_array(_UpperCAmelCase )
if do_resize:
UpperCamelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
UpperCamelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
UpperCamelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase )
if do_normalize:
UpperCamelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
UpperCamelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image:
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCamelCase_ = make_batched(_UpperCAmelCase )
UpperCamelCase_ = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
UpperCamelCase_ = {'pixel_values': videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 23 | 0 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __snake_case ( UpperCAmelCase__ ,unittest.TestCase ):
_a = WavaVecaPhonemeCTCTokenizer
_a = False
def UpperCAmelCase__ ( self : str):
super().setUp()
lowerCAmelCase_ : Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''')
lowerCAmelCase_ : Tuple = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
lowerCAmelCase_ : List[str] = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(_UpperCAmelCase) + '''\n''')
def UpperCAmelCase__ ( self : int , A_ : int , A_ : Any=False , A_ : Any=2_0 , A_ : int=5):
lowerCAmelCase_ : str = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase)) for i in range(len(_UpperCAmelCase))]
lowerCAmelCase_ : Union[str, Any] = list(filter(lambda A_: [t[0]] == tokenizer.encode(t[1] , do_phonemize=_UpperCAmelCase) , _UpperCAmelCase))
if max_length is not None and len(_UpperCAmelCase) > max_length:
lowerCAmelCase_ : List[Any] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase) < min_length and len(_UpperCAmelCase) > 0:
while len(_UpperCAmelCase) < min_length:
lowerCAmelCase_ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : Optional[Any] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : str = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase)
if " " not in output_txt and len(_UpperCAmelCase) > 1:
lowerCAmelCase_ : List[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase)
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase)
)
if with_prefix_space:
lowerCAmelCase_ : List[Any] = ''' ''' + output_txt
lowerCAmelCase_ : List[str] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase)
return output_txt, output_ids
def UpperCAmelCase__ ( self : Tuple , **A_ : List[str]):
kwargs.update(self.special_tokens_map)
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
# check adding a single token
tokenizer.add_tokens('''xxx''')
lowerCAmelCase_ : str = tokenizer('''m xxx ɪ''' , do_phonemize=_UpperCAmelCase).input_ids
self.assertEqual(_UpperCAmelCase , [1_3, 3_9_2, 1_7]) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''])
lowerCAmelCase_ : List[str] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=_UpperCAmelCase).input_ids
self.assertEqual(_UpperCAmelCase , [1_3, 3_9_3, 1_7, 3_9_5]) # aaa and ccc should be after xxx and 2 after aaa
lowerCAmelCase_ : str = tokenizer('''maɪ c''' , do_phonemize=_UpperCAmelCase).input_ids
self.assertEqual(_UpperCAmelCase , [3, 2_0_0]) # mai should be <unk> (=3)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
lowerCAmelCase_ : Tuple = '''Hello how are you'''
lowerCAmelCase_ : Any = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='''en-us''')
self.assertEqual(_UpperCAmelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''')
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
lowerCAmelCase_ : List[str] = '''Hello how are you'''
lowerCAmelCase_ : int = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='''en-us''')
self.assertEqual(tokenizer(_UpperCAmelCase).input_ids , tokenizer(_UpperCAmelCase , do_phonemize=_UpperCAmelCase).input_ids)
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
lowerCAmelCase_ : List[str] = '''Hello how are you'''
lowerCAmelCase_ : Any = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='''en-us''')
lowerCAmelCase_ : Tuple = tokenizer.decode(tokenizer(_UpperCAmelCase).input_ids)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
lowerCAmelCase_ : int = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
lowerCAmelCase_ : Optional[int] = tokenizer.decode(sample_ids[0])
lowerCAmelCase_ : Dict = tokenizer.batch_decode(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , batch_tokens[0])
self.assertEqual(_UpperCAmelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''])
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
lowerCAmelCase_ : Tuple = '''Hello how are you'''
lowerCAmelCase_ : str = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='''en-us''')
self.assertEqual(_UpperCAmelCase , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''')
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : str = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
lowerCAmelCase_ : List[str] = '''Hello how are you'''
lowerCAmelCase_ : List[str] = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='''en-us''')
self.assertEqual(tokenizer(_UpperCAmelCase).input_ids , tokenizer(_UpperCAmelCase , do_phonemize=_UpperCAmelCase).input_ids)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
# fmt: off
lowerCAmelCase_ : int = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
lowerCAmelCase_ : Optional[int] = tokenizer.decode(sample_ids[0])
lowerCAmelCase_ : Optional[Any] = tokenizer.batch_decode(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , batch_tokens[0])
self.assertEqual(_UpperCAmelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''])
# decode with no word_del_token filter
lowerCAmelCase_ : Union[str, Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_UpperCAmelCase)
lowerCAmelCase_ : List[Any] = tokenizer.batch_decode(_UpperCAmelCase , filter_word_delimiter_token=_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , batch_tokens[0])
self.assertEqual(_UpperCAmelCase , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''])
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
lowerCAmelCase_ : Optional[Any] = '''Hello how are you'''
lowerCAmelCase_ : int = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='''en-us''')
lowerCAmelCase_ : str = tokenizer.decode(tokenizer(_UpperCAmelCase).input_ids , filter_word_delimiter_token=_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
lowerCAmelCase_ : Any = '''Hello how are you'''
lowerCAmelCase_ : str = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='''en-us''')
lowerCAmelCase_ : Dict = tokenizer.decode(tokenizer(_UpperCAmelCase).input_ids , filter_word_delimiter_token=_UpperCAmelCase)
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''')]).strip() , _UpperCAmelCase)
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=_UpperCAmelCase)
lowerCAmelCase_ : Any = '''Hello how are you'''
lowerCAmelCase_ : Optional[int] = tokenizer(_UpperCAmelCase , phonemizer_lang='''en-us''').input_ids
lowerCAmelCase_ : str = tokenizer(_UpperCAmelCase , phonemizer_lang='''fr-fr''').input_ids
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase)
lowerCAmelCase_ : Tuple = tokenizer.decode(_UpperCAmelCase)
lowerCAmelCase_ : Any = tokenizer.decode(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''')
self.assertEqual(_UpperCAmelCase , '''ɛ l o h aʊ a ʁ j u''')
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
lowerCAmelCase_ : Optional[int] = '''Hello how Are you'''
lowerCAmelCase_ : Optional[int] = '''hello how are you'''
lowerCAmelCase_ : str = tokenizer(_UpperCAmelCase).input_ids
lowerCAmelCase_ : str = tokenizer(_UpperCAmelCase).input_ids
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
tokenizer.add_tokens(['''!''', '''?'''])
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''})
# fmt: off
lowerCAmelCase_ : Dict = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
lowerCAmelCase_ : Dict = tokenizer.batch_decode(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''])
@staticmethod
def UpperCAmelCase__ ( A_ : Optional[Any] , A_ : str):
lowerCAmelCase_ : Any = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Any = self.get_tokenizer(word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCAmelCase_ : List[Any] = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
lowerCAmelCase_ : Any = tokenizer.decode(_UpperCAmelCase , output_char_offsets=_UpperCAmelCase , filter_word_delimiter_token=_UpperCAmelCase)
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys()) , 2)
self.assertTrue('''text''' in outputs)
self.assertTrue('''char_offsets''' in outputs)
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase))
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''')) , outputs.text)
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''') , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''])
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''') , [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6])
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''') , [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7])
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Any = self.get_tokenizer(word_delimiter_token='''|''')
def check_list_tuples_equal(A_ : Optional[int] , A_ : Any):
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(isinstance(outputs_list[0] , _UpperCAmelCase))
# transform list to ModelOutput
lowerCAmelCase_ : Optional[Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]})
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''])
def recursive_check(A_ : List[Any] , A_ : int):
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
[recursive_check(_UpperCAmelCase , _UpperCAmelCase) for la, la in zip(_UpperCAmelCase , _UpperCAmelCase)]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''])
# fmt: off
lowerCAmelCase_ : List[Any] = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCAmelCase_ : List[Any] = tokenizer.batch_decode(_UpperCAmelCase , output_char_offsets=_UpperCAmelCase)
lowerCAmelCase_ : List[Any] = [tokenizer.decode(_UpperCAmelCase , output_char_offsets=_UpperCAmelCase) for ids in sample_ids]
check_list_tuples_equal(_UpperCAmelCase , _UpperCAmelCase)
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''')
def UpperCAmelCase__ ( self : List[str]):
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''')
def UpperCAmelCase__ ( self : int):
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''')
def UpperCAmelCase__ ( self : int):
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''')
def UpperCAmelCase__ ( self : List[Any]):
pass
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Optional[Any] = self.get_tokenizers(do_lower_case=_UpperCAmelCase)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
lowerCAmelCase_ : str = tokenizer.vocab_size
lowerCAmelCase_ : int = len(_UpperCAmelCase)
self.assertNotEqual(_UpperCAmelCase , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase_ : int = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
lowerCAmelCase_ : str = tokenizer.add_tokens(_UpperCAmelCase)
lowerCAmelCase_ : List[str] = tokenizer.vocab_size
lowerCAmelCase_ : List[str] = len(_UpperCAmelCase)
self.assertNotEqual(_UpperCAmelCase , 0)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase))
self.assertEqual(_UpperCAmelCase , all_size + len(_UpperCAmelCase))
lowerCAmelCase_ : Optional[int] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_UpperCAmelCase)
self.assertGreaterEqual(len(_UpperCAmelCase) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
lowerCAmelCase_ : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
lowerCAmelCase_ : Union[str, Any] = tokenizer.add_special_tokens(_UpperCAmelCase)
lowerCAmelCase_ : Union[str, Any] = tokenizer.vocab_size
lowerCAmelCase_ : Union[str, Any] = len(_UpperCAmelCase)
self.assertNotEqual(_UpperCAmelCase , 0)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase))
self.assertEqual(_UpperCAmelCase , all_size_a + len(_UpperCAmelCase))
lowerCAmelCase_ : str = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_UpperCAmelCase)
self.assertGreaterEqual(len(_UpperCAmelCase) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''')
def UpperCAmelCase__ ( self : List[str]):
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''')
def UpperCAmelCase__ ( self : Optional[Any]):
pass
def UpperCAmelCase__ ( self : Union[str, Any]):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
lowerCAmelCase_ : int = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
lowerCAmelCase_ : int = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_tokens_to_string(_UpperCAmelCase)
self.assertIsInstance(output['''text'''] , _UpperCAmelCase)
| 171 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
class _a ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.1_5 , _UpperCAmelCase = 0.0_1 , _UpperCAmelCase = 1_3_4_8.0 , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
UpperCamelCase_ = sigma_max
# setable values
UpperCamelCase_ = None
self.set_sigmas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor:
return sample
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> str:
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase_ = torch.linspace(1 , _UpperCAmelCase , _UpperCAmelCase , device=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Any:
UpperCamelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase_ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ) , math.log(_UpperCAmelCase ) , _UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
UpperCamelCase_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase_ = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase_ = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase_ = self.get_adjacent_sigma(_UpperCAmelCase , _UpperCAmelCase ).to(sample.device )
UpperCamelCase_ = torch.zeros_like(_UpperCAmelCase )
UpperCamelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase_ = diffusion.unsqueeze(-1 )
UpperCamelCase_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase_ = randn_tensor(
sample.shape , layout=sample.layout , generator=_UpperCAmelCase , device=sample.device , dtype=sample.dtype )
UpperCamelCase_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_UpperCAmelCase , prev_sample_mean=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase_ = step_size.unsqueeze(-1 )
UpperCamelCase_ = sample + step_size * model_output
UpperCamelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase_ = timesteps.to(original_samples.device )
UpperCamelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None]
)
UpperCamelCase_ = noise + original_samples
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 23 | 0 |
import argparse
from collections import defaultdict
def lowercase ( __A : Tuple , __A : int , __A : str , __A : Optional[int] , __A : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__lowercase , """r""" ) as f:
snake_case : str = f.readlines()
snake_case : List[Any] = f"""class {class_name}("""
snake_case : Tuple = f"""{4 * " "}def {test_name}("""
snake_case : Any = f"""{8 * " "}{correct_line.split()[0]}"""
snake_case : Tuple = f"""{16 * " "}{correct_line.split()[0]}"""
snake_case : List[Any] = False
snake_case : Tuple = False
snake_case : Optional[Any] = False
snake_case : int = False
snake_case : Optional[int] = 0
snake_case : Optional[Any] = 0
snake_case : Tuple = []
for line in lines:
if line.startswith(__lowercase ):
snake_case : List[Any] = True
elif in_class and line.startswith(__lowercase ):
snake_case : List[str] = True
elif in_class and in_func and (line.startswith(__lowercase ) or line.startswith(__lowercase )):
snake_case : Optional[int] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
snake_case : Optional[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
snake_case : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
snake_case : Any = False
else:
new_lines.append(__lowercase )
with open(__lowercase , """w""" ) as f:
for line in new_lines:
f.write(__lowercase )
def lowercase ( __A : Optional[Any] , __A : Tuple=None ) -> List[str]:
'''simple docstring'''
if fail is not None:
with open(__lowercase , """r""" ) as f:
snake_case : Optional[int] = {l.strip() for l in f.readlines()}
else:
snake_case : Union[str, Any] = None
with open(__lowercase , """r""" ) as f:
snake_case : Dict = f.readlines()
snake_case : List[str] = defaultdict(__lowercase )
for line in correct_lines:
snake_case , snake_case , snake_case , snake_case : Any = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
__lowercase : Optional[int] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 36 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[int] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 | 0 |
'''simple docstring'''
import numpy as np
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : str ):
a__ = (0, 0)
a__ = None
a__ = 0
a__ = 0
a__ = 0
def __eq__( self : str ,a__ : List[str] ):
return self.position == cell.position
def lowerCAmelCase_ ( self : Union[str, Any] ):
print(self.position )
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Dict ,a__ : int=(5, 5) ):
a__ = np.zeros(_UpperCAmelCase )
a__ = world_size[0]
a__ = world_size[1]
def lowerCAmelCase_ ( self : Dict ):
print(self.w )
def lowerCAmelCase_ ( self : List[Any] ,a__ : Optional[Any] ):
a__ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
a__ = cell.position[0]
a__ = cell.position[1]
a__ = []
for n in neughbour_cord:
a__ = current_x + n[0]
a__ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
a__ = Cell()
a__ = (x, y)
a__ = cell
neighbours.append(_UpperCAmelCase )
return neighbours
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ = []
a__ = []
_open.append(__lowercase )
while _open:
a__ = np.argmin([n.f for n in _open] )
a__ = _open[min_f]
_closed.append(_open.pop(__lowercase ) )
if current == goal:
break
for n in world.get_neigbours(__lowercase ):
for c in _closed:
if c == n:
continue
a__ = current.g + 1
a__ , a__ = n.position
a__ , a__ = goal.position
a__ = (ya - ya) ** 2 + (xa - xa) ** 2
a__ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__lowercase )
a__ = []
while current.parent is not None:
path.append(current.position )
a__ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCamelCase_ : List[str] = Gridworld()
# Start position and goal
UpperCamelCase_ : int = Cell()
UpperCamelCase_ : Dict = (0, 0)
UpperCamelCase_ : Optional[int] = Cell()
UpperCamelCase_ : str = (4, 4)
print(F"path from {start.position} to {goal.position}")
UpperCamelCase_ : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCamelCase_ : List[str] = 1
print(world.w)
| 331 |
import datasets
from .evaluate import evaluate
snake_case__ : int = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
snake_case__ : Union[str, Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
snake_case__ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
UpperCamelCase_ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 23 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def A_ ( lowercase_ , lowercase_ ) -> Dict:
_snake_case : List[Any] = iter(__lowercase )
while True:
_snake_case : Tuple = tuple(itertools.islice(__lowercase , __lowercase ) )
if not chunk:
return
yield chunk
def A_ ( lowercase_ ) -> Optional[int]:
_snake_case : Any = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_snake_case : int = ''''''
if len(__lowercase ) < 2:
return dirty
for i in range(len(__lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__lowercase ) & 1:
clean += "X"
return clean
def A_ ( lowercase_ ) -> str:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
_snake_case : Optional[int] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_snake_case : Any = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__lowercase )
return table
def A_ ( lowercase_ , lowercase_ ) -> Tuple:
_snake_case : Any = generate_table(__lowercase )
_snake_case : Any = prepare_input(__lowercase )
_snake_case : Optional[Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowercase , 2 ):
_snake_case , _snake_case : int = divmod(table.index(__lowercase ) , 5 )
_snake_case , _snake_case : Dict = divmod(table.index(__lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def A_ ( lowercase_ , lowercase_ ) -> Tuple:
_snake_case : Dict = generate_table(__lowercase )
_snake_case : int = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowercase , 2 ):
_snake_case , _snake_case : Optional[Any] = divmod(table.index(__lowercase ) , 5 )
_snake_case , _snake_case : Optional[Any] = divmod(table.index(__lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 326 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def _snake_case ():
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
def _snake_case ():
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@require_beam
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> List[str]:
import apache_beam as beam
UpperCamelCase_ = beam.io.parquetio.WriteToParquet
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 23 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
_a = 42
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Union[str, Any] , lowerCAmelCase : str = 32 , lowerCAmelCase : Dict = 64 , lowerCAmelCase : Union[str, Any] = 20 , lowerCAmelCase : List[Any] = 768 , lowerCAmelCase : List[Any]=77 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Optional[Any] = 0.0 , lowerCAmelCase : str = "silu" , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = None , lowerCAmelCase : int = "linear" , lowerCAmelCase : str = "prd" , lowerCAmelCase : Tuple = None , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : Any = None , ):
super().__init__()
lowerCAmelCase = num_attention_heads
lowerCAmelCase = attention_head_dim
lowerCAmelCase = num_attention_heads * attention_head_dim
lowerCAmelCase = additional_embeddings
lowerCAmelCase = time_embed_dim or inner_dim
lowerCAmelCase = embedding_proj_dim or embedding_dim
lowerCAmelCase = clip_embed_dim or embedding_dim
lowerCAmelCase = Timesteps(_UpperCAmelCase , _UpperCAmelCase , 0 )
lowerCAmelCase = TimestepEmbedding(_UpperCAmelCase , _UpperCAmelCase , out_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase )
lowerCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
if embedding_proj_norm_type is None:
lowerCAmelCase = None
elif embedding_proj_norm_type == "layer":
lowerCAmelCase = nn.LayerNorm(_UpperCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
lowerCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
if encoder_hid_proj_type is None:
lowerCAmelCase = None
elif encoder_hid_proj_type == "linear":
lowerCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
lowerCAmelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _UpperCAmelCase ) )
if added_emb_type == "prd":
lowerCAmelCase = nn.Parameter(torch.zeros(1 , 1 , _UpperCAmelCase ) )
elif added_emb_type is None:
lowerCAmelCase = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dropout=_UpperCAmelCase , activation_fn="""gelu""" , attention_bias=_UpperCAmelCase , )
for d in range(_UpperCAmelCase )
] )
if norm_in_type == "layer":
lowerCAmelCase = nn.LayerNorm(_UpperCAmelCase )
elif norm_in_type is None:
lowerCAmelCase = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
lowerCAmelCase = nn.LayerNorm(_UpperCAmelCase )
lowerCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
lowerCAmelCase = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _UpperCAmelCase , persistent=_UpperCAmelCase )
lowerCAmelCase = nn.Parameter(torch.zeros(1 , _UpperCAmelCase ) )
lowerCAmelCase = nn.Parameter(torch.zeros(1 , _UpperCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowercase ( self : str ):
lowerCAmelCase = {}
def fn_recursive_add_processors(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
if hasattr(_UpperCAmelCase , """set_processor""" ):
lowerCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , _UpperCAmelCase , _UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return processors
def __lowercase ( self : Any , lowerCAmelCase : str ):
lowerCAmelCase = len(self.attn_processors.keys() )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_UpperCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
if hasattr(_UpperCAmelCase , """set_processor""" ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
module.set_processor(_UpperCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , _UpperCAmelCase , _UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __lowercase ( self : Dict ):
self.set_attn_processor(AttnProcessor() )
def __lowercase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : str = None , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : str = True , ):
lowerCAmelCase = hidden_states.shape[0]
lowerCAmelCase = timestep
if not torch.is_tensor(_UpperCAmelCase ):
lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowerCAmelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCAmelCase = timesteps * torch.ones(_UpperCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
lowerCAmelCase = self.time_proj(_UpperCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowerCAmelCase = timesteps_projected.to(dtype=self.dtype )
lowerCAmelCase = self.time_embedding(_UpperCAmelCase )
if self.embedding_proj_norm is not None:
lowerCAmelCase = self.embedding_proj_norm(_UpperCAmelCase )
lowerCAmelCase = self.embedding_proj(_UpperCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowerCAmelCase = self.encoder_hidden_states_proj(_UpperCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
lowerCAmelCase = self.proj_in(_UpperCAmelCase )
lowerCAmelCase = self.positional_embedding.to(hidden_states.dtype )
lowerCAmelCase = []
lowerCAmelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(_UpperCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowerCAmelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowerCAmelCase = hidden_states[:, None, :]
lowerCAmelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowerCAmelCase = self.prd_embedding.to(hidden_states.dtype ).expand(_UpperCAmelCase , -1 , -1 )
additional_embeds.append(_UpperCAmelCase )
lowerCAmelCase = torch.cat(
_UpperCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowerCAmelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowerCAmelCase = F.pad(
_UpperCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
lowerCAmelCase = hidden_states + positional_embeddings
if attention_mask is not None:
lowerCAmelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
lowerCAmelCase = F.pad(_UpperCAmelCase , (0, self.additional_embeddings) , value=0.0 )
lowerCAmelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowerCAmelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
lowerCAmelCase = self.norm_in(_UpperCAmelCase )
for block in self.transformer_blocks:
lowerCAmelCase = block(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
lowerCAmelCase = self.norm_out(_UpperCAmelCase )
if self.prd_embedding is not None:
lowerCAmelCase = hidden_states[:, -1]
else:
lowerCAmelCase = hidden_states[:, additional_embeddings_len:]
lowerCAmelCase = self.proj_to_clip_embeddings(_UpperCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_UpperCAmelCase )
def __lowercase ( self : Optional[int] , lowerCAmelCase : Optional[Any] ):
lowerCAmelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 169 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = AlbertForPreTraining(__lowercase)
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self ) -> Union[str, Any]:
__a : Optional[Any] = inspect.getfile(accelerate.test_utils )
__a : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__a : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
__a : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __magic_name__ ( self ) -> Dict:
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a : Union[str, Any] = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__ ( self ) -> Optional[Any]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a : Union[str, Any] = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__ ( self ) -> Any:
__a : List[str] = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__ ( self ) -> List[str]:
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__a : Any = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ = (accelerator.state.process_index + 2, 1_0)
SCREAMING_SNAKE_CASE_ = torch.randint(0, 1_0, shape).to(accelerator.device)
SCREAMING_SNAKE_CASE_ = """"""
SCREAMING_SNAKE_CASE_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
SCREAMING_SNAKE_CASE_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
SCREAMING_SNAKE_CASE_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 597 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase_ = bertabert.config.encoder.vocab_size
UpperCamelCase_ = tokenizer.sep_token_id
UpperCamelCase_ = tokenizer.cls_token_id
UpperCamelCase_ = 128
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase_ = train_dataset.select(range(32 ) )
UpperCamelCase_ = val_dataset.select(range(16 ) )
UpperCamelCase_ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase_ = inputs.input_ids
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = outputs.input_ids
UpperCamelCase_ = outputs.input_ids.copy()
UpperCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase_ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
UpperCamelCase_ = pred.label_ids
UpperCamelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase_ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 23 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = """lxmert"""
_SCREAMING_SNAKE_CASE = {}
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=3_05_22 , __SCREAMING_SNAKE_CASE : Any=7_68 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=95_00 , __SCREAMING_SNAKE_CASE : List[Any]=16_00 , __SCREAMING_SNAKE_CASE : List[str]=4_00 , __SCREAMING_SNAKE_CASE : int=30_72 , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Any=5_12 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-12 , __SCREAMING_SNAKE_CASE : List[str]=9 , __SCREAMING_SNAKE_CASE : str=5 , __SCREAMING_SNAKE_CASE : Optional[Any]=5 , __SCREAMING_SNAKE_CASE : Optional[Any]=20_48 , __SCREAMING_SNAKE_CASE : Optional[int]=4 , __SCREAMING_SNAKE_CASE : Dict=6.67 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Tuple=True , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
__a = vocab_size
__a = hidden_size
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = num_qa_labels
__a = num_object_labels
__a = num_attr_labels
__a = l_layers
__a = x_layers
__a = r_layers
__a = visual_feat_dim
__a = visual_pos_dim
__a = visual_loss_normalizer
__a = task_matched
__a = task_mask_lm
__a = task_obj_predict
__a = task_qa
__a = visual_obj_loss
__a = visual_attr_loss
__a = visual_feat_loss
__a = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**_UpperCAmelCase )
| 197 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : Dict = 1_6
snake_case__ : List[str] = 3_2
def _snake_case (__lowercase , __lowercase = 16):
UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase_ = load_dataset('glue' , 'mrpc')
def tokenize_function(__lowercase):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(__lowercase):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ = 8
else:
UpperCamelCase_ = None
return tokenizer.pad(
__lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
UpperCamelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case__ : List[str] = mocked_dataloaders # noqa: F811
def _snake_case (__lowercase , __lowercase):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1":
UpperCamelCase_ = 2
# New Code #
UpperCamelCase_ = int(args.gradient_accumulation_steps)
# Initialize accelerator
UpperCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config['lr']
UpperCamelCase_ = int(config['num_epochs'])
UpperCamelCase_ = int(config['seed'])
UpperCamelCase_ = int(config['batch_size'])
UpperCamelCase_ = evaluate.load('glue' , 'mrpc')
set_seed(__lowercase)
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase)
# Instantiate scheduler
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# Now we train the model
for epoch in range(__lowercase):
model.train()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowercase):
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = output.loss
accelerator.backward(__lowercase)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = outputs.logits.argmax(dim=-1)
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
UpperCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowercase)
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowercase , __lowercase)
if __name__ == "__main__":
main()
| 23 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __lowerCamelCase ( A__ : Dict ) -> Tuple:
lowerCamelCase_ : Optional[Any] = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCamelCase_ : int = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCamelCase_ : List[str] = 0.01
with locka.acquire():
with pytest.raises(__lowercase ):
lowerCamelCase_ : List[Any] = time.time()
locka.acquire(__lowercase )
assert time.time() - _start > timeout
def __lowerCamelCase ( A__ : str ) -> Dict:
lowerCamelCase_ : int = """a""" * 1000 + """.lock"""
lowerCamelCase_ : str = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(__lowercase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowerCamelCase_ : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__lowercase ):
locka.acquire(0 )
| 278 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = is_training
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = num_queries
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_size
UpperCamelCase_ = max_size
UpperCamelCase_ = num_labels
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = hidden_dim
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
UpperCamelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
UpperCamelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase_ = self.num_queries
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = [1, 1, 1, 1]
UpperCamelCase_ = self.num_channels
UpperCamelCase_ = 64
UpperCamelCase_ = 128
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
return config
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = output.encoder_hidden_states
UpperCamelCase_ = output.pixel_decoder_hidden_states
UpperCamelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any:
with torch.no_grad():
UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
UpperCamelCase_ = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = (self.model_tester.min_size,) * 2
UpperCamelCase_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
UpperCamelCase_ = self.model_tester.get_config()
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
UpperCamelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case__ : List[Any] = 1E-4
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# masks_queries_logits
UpperCamelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
UpperCamelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase )
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']]
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 23 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase_ : Union[str, Any] = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCAmelCase__ = PegasusConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self : str , lowercase__ : int , lowercase__ : Optional[Any]=13 , lowercase__ : str=7 , lowercase__ : Tuple=True , lowercase__ : List[Any]=False , lowercase__ : Union[str, Any]=99 , lowercase__ : Dict=32 , lowercase__ : Tuple=5 , lowercase__ : Tuple=4 , lowercase__ : int=37 , lowercase__ : Any=0.1 , lowercase__ : List[Any]=0.1 , lowercase__ : Optional[int]=20 , lowercase__ : Optional[Any]=2 , lowercase__ : List[Any]=1 , lowercase__ : Optional[Any]=0 , ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Tuple = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : List[str] = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : int = use_labels
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : int = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : List[str] = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : List[Any] = eos_token_id
_UpperCamelCase : List[Any] = pad_token_id
_UpperCamelCase : Optional[int] = bos_token_id
def snake_case__ ( self : int ) ->str:
'''simple docstring'''
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCamelCase : Optional[int] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : int = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase : Optional[Any] = prepare_pegasus_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def snake_case__ ( self : Dict , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) ->Any:
'''simple docstring'''
_UpperCamelCase : Tuple = 20
_UpperCamelCase : Any = model_class_name(_UpperCAmelCase )
_UpperCamelCase : Dict = model.encode(inputs_dict["input_ids"] )
_UpperCamelCase , _UpperCamelCase : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCamelCase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
_UpperCamelCase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_UpperCamelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
_UpperCamelCase : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCamelCase : Any = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , )
_UpperCamelCase : Any = model.decode(_UpperCAmelCase , _UpperCAmelCase )
_UpperCamelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def snake_case__ ( self : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : str = 20
_UpperCamelCase : List[str] = model_class_name(_UpperCAmelCase )
_UpperCamelCase : List[Any] = model.encode(inputs_dict["input_ids"] )
_UpperCamelCase , _UpperCamelCase : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCamelCase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCamelCase : int = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
_UpperCamelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
_UpperCamelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCamelCase : Dict = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
_UpperCamelCase : List[Any] = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase )
_UpperCamelCase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=None ,UpperCAmelCase=None ,) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
_UpperCamelCase : List[str] = np.not_equal(__lowercase ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def snake_case__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = FlaxPegasusModelTester(self )
_UpperCamelCase : int = ConfigTester(self , config_class=_UpperCAmelCase )
def snake_case__ ( self : Any ) ->str:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Dict ) ->Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def snake_case__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def snake_case__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase : int = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_UpperCamelCase : str = model_class(_UpperCAmelCase )
@jax.jit
def encode_jitted(lowercase__ : Optional[int] , lowercase__ : str=None , **lowercase__ : List[str] ):
return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
with self.subTest("JIT Enabled" ):
_UpperCamelCase : List[str] = encode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCamelCase : Union[str, Any] = encode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase : Tuple = model_class(_UpperCAmelCase )
_UpperCamelCase : str = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_UpperCamelCase : List[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Dict ):
return model.decode(
decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , )
with self.subTest("JIT Enabled" ):
_UpperCamelCase : Optional[int] = decode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCamelCase : Union[str, Any] = decode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : Dict ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class_name.from_pretrained("google/pegasus-large" , from_pt=_UpperCAmelCase )
_UpperCamelCase : Any = np.ones((1, 1) )
_UpperCamelCase : Dict = model(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
def snake_case__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
_UpperCamelCase : Optional[Any] = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
_UpperCamelCase : str = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_UpperCamelCase : Optional[Any] = [
"California\'s largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.",
]
_UpperCamelCase : Optional[int] = tokenizer(_UpperCAmelCase , return_tensors="np" , truncation=_UpperCAmelCase , max_length=512 , padding=_UpperCAmelCase )
_UpperCamelCase : Optional[int] = model.generate(**_UpperCAmelCase , num_beams=2 ).sequences
_UpperCamelCase : Optional[Any] = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
assert tgt_text == decoded
| 435 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """vision-encoder-decoder"""
A_ = True
def __init__( self , **_UpperCAmelCase ) -> Dict:
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCamelCase_ = kwargs.pop('encoder' )
UpperCamelCase_ = encoder_config.pop('model_type' )
UpperCamelCase_ = kwargs.pop('decoder' )
UpperCamelCase_ = decoder_config.pop('model_type' )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = True
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCamelCase_ = True
UpperCamelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.encoder.to_dict()
UpperCamelCase_ = self.decoder.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
import torch
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = super().generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape
UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_ = dummy_input.pop('input_ids' )
UpperCamelCase_ = dummy_input.pop('attention_mask' )
UpperCamelCase_ = torch.zeros(_UpperCAmelCase )
return common_inputs
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> None:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig:
UpperCamelCase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
| 23 | 0 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__):
@slow
@require_torch
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase = bertabert.config.encoder.vocab_size
UpperCamelCase = tokenizer.sep_token_id
UpperCamelCase = tokenizer.cls_token_id
UpperCamelCase = 128
UpperCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase = train_dataset.select(range(32 ) )
UpperCamelCase = val_dataset.select(range(16 ) )
UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(A_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase = inputs.input_ids
UpperCamelCase = inputs.attention_mask
UpperCamelCase = outputs.input_ids
UpperCamelCase = outputs.input_ids.copy()
UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(A_ ):
UpperCamelCase = pred.label_ids
UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 3 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = MobileBertForPreTraining(__lowercase)
# Load weights from tf checkpoint
UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = field(default_factory=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = field(default_factory=UpperCAmelCase__ )
def A ( self : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
__snake_case = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__( self : Tuple , a_ : Tuple ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def A ( self : List[Any] ):
"""simple docstring"""
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = field(default_factory=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = field(default_factory=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = True
def __call__( self : str , a_ : Any ):
"""simple docstring"""
__snake_case = Tracker(self.dest )(_UpperCAmelCase ).parametrized
__snake_case = Tracker(self.src )(_UpperCAmelCase ).parametrized
__snake_case = list(filter(lambda a_ : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
__snake_case = list(filter(lambda a_ : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ) and self.raise_if_mismatch:
raise Exception(
f'''Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while'''
f''' destination module has {len(_UpperCAmelCase )}.''' )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[Any] , a_ : List[Any] ):
"""simple docstring"""
super().__init__()
__snake_case = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f'''Unexpected layer name {k}'''
__snake_case = len(_UpperCAmelCase ) + 1
feature_blocks.append((f'''res{block_index}''', v) )
__snake_case = nn.ModuleDict(_UpperCAmelCase )
def A ( self : Any , a_ : int ):
"""simple docstring"""
return get_trunk_forward_outputs(
_UpperCAmelCase , out_feat_keys=_UpperCAmelCase , feature_blocks=self._feature_blocks , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def A ( self : Optional[Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Union[str, Any] , a_ : Tuple ):
"""simple docstring"""
if x not in self:
__snake_case = self.convert_name_to_timm(_UpperCAmelCase )
__snake_case = partial(lambda: (timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ).eval(), None) )
else:
__snake_case = super().__getitem__(_UpperCAmelCase )
return val
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __getitem__( self : Tuple , a_ : str ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
__snake_case = RegNetModel
else:
__snake_case = RegNetForImageClassification
return val
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ) -> Any:
for from_key, to_key in keys:
__snake_case = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int = True , ) -> List[str]:
print(F'''Converting {name}...''' )
with torch.no_grad():
__snake_case , __snake_case = from_model_func()
__snake_case = our_model_func(__lowercase ).eval()
__snake_case = ModuleTransfer(src=__lowercase , dest=__lowercase , raise_if_mismatch=__lowercase )
__snake_case = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(__lowercase )
if from_state_dict is not None:
__snake_case = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__snake_case = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
__snake_case = manually_copy_vissl_head(__lowercase , our_model.state_dict() , __lowercase )
our_model.load_state_dict(__lowercase )
__snake_case = our_model(__lowercase , output_hidden_states=__lowercase )
__snake_case = (
our_outputs.logits if isinstance(__lowercase , __lowercase ) else our_outputs.last_hidden_state
)
__snake_case = from_model(__lowercase )
__snake_case = from_output[-1] if type(__lowercase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__snake_case = our_outputs.hidden_states[-1]
assert torch.allclose(__lowercase , __lowercase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=__lowercase , )
__snake_case = 2_24 if "seer" not in name else 3_84
# we can use the convnext one
__snake_case = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=__lowercase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=__lowercase , )
print(F'''Pushed {name}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any = None , _UpperCAmelCase : int = True ) -> List[Any]:
__snake_case = "imagenet-1k-id2label.json"
__snake_case = 10_00
__snake_case = (1, num_labels)
__snake_case = "huggingface/label-files"
__snake_case = num_labels
__snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type="dataset" ) ) , "r" ) )
__snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase )
__snake_case = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
__snake_case = NameToOurModelFuncMap()
__snake_case = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> Tuple[nn.Module, Dict]:
__snake_case = torch.hub.load_state_dict_from_url(__lowercase , model_dir=str(__lowercase ) , map_location="cpu" )
__snake_case = model_func()
# check if we have a head, if yes add it
__snake_case = files["classy_state_dict"]["base_model"]["model"]
__snake_case = model_state_dict["trunk"]
model.load_state_dict(__lowercase )
return model.eval(), model_state_dict["heads"]
# pretrained
__snake_case = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__snake_case = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__snake_case = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__snake_case = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowercase , __lowercase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowercase , __lowercase , __lowercase , )
return config, expected_shape
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
a : Dict = parser.parse_args()
a : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 69 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_MASKED_LM_MAPPING
A_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
] , )
UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
UpperCamelCase_ = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 12790,
'token_str': ' Lyon',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = fill_masker.tokenizer
UpperCamelCase_ = fill_masker.model
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker('This is' )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs]
UpperCamelCase_ = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
| 23 | 0 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def UpperCamelCase( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : str=1e-1_2 ):
lowerCAmelCase_ : List[Any] = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(__lowercase ,axis=1 ) ,a_min=__lowercase ) ).T
lowerCAmelCase_ : List[str] = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(__lowercase ,axis=1 ) ,a_min=__lowercase ) ).T
return jnp.matmul(__lowercase ,norm_emb_a.T )
class __snake_case ( nn.Module ):
_a = 42
_a = jnp.floataa
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Tuple = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=_UpperCAmelCase , dtype=self.dtype)
lowerCAmelCase_ : Tuple = self.param('''concept_embeds''' , jax.nn.initializers.ones , (1_7, self.config.projection_dim))
lowerCAmelCase_ : List[Any] = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase_ : Optional[int] = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (1_7,))
lowerCAmelCase_ : Tuple = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,))
def __call__( self : Optional[int] , A_ : Optional[int]):
lowerCAmelCase_ : List[Any] = self.vision_model(_UpperCAmelCase)[1]
lowerCAmelCase_ : int = self.visual_projection(_UpperCAmelCase)
lowerCAmelCase_ : Union[str, Any] = jax_cosine_distance(_UpperCAmelCase , self.special_care_embeds)
lowerCAmelCase_ : Optional[int] = jax_cosine_distance(_UpperCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase_ : List[str] = 0.0
lowerCAmelCase_ : Dict = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase_ : Dict = jnp.round(_UpperCAmelCase , 3)
lowerCAmelCase_ : List[str] = jnp.any(special_scores > 0 , axis=1 , keepdims=_UpperCAmelCase)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase_ : List[str] = is_special_care * 0.01
lowerCAmelCase_ : Tuple = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase_ : Tuple = jnp.round(_UpperCAmelCase , 3)
lowerCAmelCase_ : Any = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class __snake_case ( UpperCAmelCase__ ):
_a = CLIPConfig
_a = '''clip_input'''
_a = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Dict , A_ : Tuple , A_ : Dict = None , A_ : str = 0 , A_ : Optional[Any] = jnp.floataa , A_ : int = True , **A_ : Any , ):
if input_shape is None:
lowerCAmelCase_ : Dict = (1, 2_2_4, 2_2_4, 3)
lowerCAmelCase_ : Any = self.module_class(config=_UpperCAmelCase , dtype=_UpperCAmelCase , **_UpperCAmelCase)
super().__init__(_UpperCAmelCase , _UpperCAmelCase , input_shape=_UpperCAmelCase , seed=_UpperCAmelCase , dtype=_UpperCAmelCase , _do_init=_do_init)
def UpperCAmelCase__ ( self : List[Any] , A_ : str , A_ : Any , A_ : Optional[Any] = None):
# init input tensor
lowerCAmelCase_ : int = jax.random.normal(_UpperCAmelCase , _UpperCAmelCase)
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = jax.random.split(_UpperCAmelCase)
lowerCAmelCase_ : Dict = {'''params''': params_rng, '''dropout''': dropout_rng}
lowerCAmelCase_ : List[Any] = self.module.init(_UpperCAmelCase , _UpperCAmelCase)['''params''']
return random_params
def __call__( self : List[str] , A_ : Dict , A_ : int = None , ):
lowerCAmelCase_ : Optional[Any] = jnp.transpose(_UpperCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{'''params''': params or self.params} , jnp.array(_UpperCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 171 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionSAGPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase )
UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[Any]:
if str(_UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCamelCase_ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
UpperCamelCase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 23 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[Any] = MBartConfig
__lowerCamelCase : Dict = {}
__lowerCamelCase : str = '''gelu'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=13 ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=99 ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=37 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=20 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=0 ,):
'''simple docstring'''
snake_case : int = parent
snake_case : List[str] = batch_size
snake_case : List[Any] = seq_length
snake_case : Optional[int] = is_training
snake_case : int = use_labels
snake_case : Any = vocab_size
snake_case : str = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Any = intermediate_size
snake_case : Tuple = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : List[str] = max_position_embeddings
snake_case : List[Any] = eos_token_id
snake_case : Optional[int] = pad_token_id
snake_case : Optional[Any] = bos_token_id
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
snake_case : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
snake_case : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
snake_case : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case : int = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
snake_case : Tuple = prepare_mbart_inputs_dict(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
return config, inputs_dict
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = TFMBartModel(config=_UpperCAmelCase ).get_decoder()
snake_case : int = inputs_dict["""input_ids"""]
snake_case : Tuple = input_ids[:1, :]
snake_case : List[str] = inputs_dict["""attention_mask"""][:1, :]
snake_case : Union[str, Any] = inputs_dict["""head_mask"""]
snake_case : Tuple = 1
# first forward pass
snake_case : Union[str, Any] = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ,head_mask=_UpperCAmelCase ,use_cache=_UpperCAmelCase )
snake_case , snake_case : str = outputs.to_tuple()
snake_case : Tuple = past_key_values[1]
def lowercase ( __A : Optional[int] , __A : Tuple , __A : str , __A : int=None , __A : List[Any]=None , __A : str=None , __A : str=None , __A : Optional[Any]=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
snake_case : int = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case : Tuple = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__lowerCamelCase : List[str] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Optional[int] = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : Tuple = False
__lowerCamelCase : Optional[Any] = False
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = TFMBartModelTester(self )
snake_case : str = ConfigTester(self ,config_class=_UpperCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _A ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
__lowerCamelCase : List[Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
__lowerCamelCase : Optional[Any] = '''facebook/mbart-large-en-ro'''
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case_ ( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Any = self.translate_src_text(**_UpperCAmelCase )
self.assertListEqual(self.expected_text ,_UpperCAmelCase )
def snake_case_ ( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[int] = self.tokenizer(self.src_text ,**_UpperCAmelCase ,return_tensors="""tf""" )
snake_case : Optional[int] = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 )
snake_case : Union[str, Any] = self.tokenizer.batch_decode(_UpperCAmelCase ,skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def snake_case_ ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 36 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
snake_case__ : List[str] = TypeVar("""T""")
def _snake_case (__lowercase):
return (position - 1) // 2
def _snake_case (__lowercase):
return (2 * position) + 1
def _snake_case (__lowercase):
return (2 * position) + 2
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = []
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def _UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase_ = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase_ , UpperCamelCase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase_ , UpperCamelCase_ = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Update the weight of the given key
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ = (elem, weight)
if position > 0:
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase_ = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ = get_child_left_position(_UpperCAmelCase )
UpperCamelCase_ = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Swap the nodes at the given positions
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ , UpperCamelCase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase_ = nodea_pos
UpperCamelCase_ = nodea_pos
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase_ = {}
self.nodes += 1
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
UpperCamelCase_ = weight
UpperCamelCase_ = weight
def _snake_case (__lowercase , ):
UpperCamelCase_ = {node: maxsize for node in graph.connections}
UpperCamelCase_ = {node: None for node in graph.connections}
UpperCamelCase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowercase , __lowercase)
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase_ = priority_queue.extract_min()
UpperCamelCase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
return dist, parent
| 23 | 0 |
'''simple docstring'''
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = int(__lowercase )
if n_element < 1:
a__ = ValueError("a should be a positive number" )
raise my_error
a__ = [1]
a__ , a__ , a__ = (0, 0, 0)
a__ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCamelCase_ : str = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
UpperCamelCase_ : Optional[Any] = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"The list with nth numbers is: {hamming_numbers}")
print("""-----------------------------------------------------""")
| 331 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ : Dict = TypeVar("""T""")
class _a ( Generic[T] ):
"""simple docstring"""
A_ = 42 # Cache store of keys
A_ = 42 # References of the keys in cache
A_ = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = deque()
UpperCamelCase_ = set()
if not n:
UpperCamelCase_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCamelCase_ = n
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase_ = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> None:
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 23 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase_ = {
"""gpt-neox-20b""": 2_0_4_8,
}
class A (UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="<|endoftext|>" , lowercase_="<|endoftext|>" , lowercase_="<|endoftext|>" , lowercase_=False , **lowercase_ , ) -> Tuple:
'''simple docstring'''
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _UpperCAmelCase ) != add_prefix_space:
_snake_case : str = getattr(_UpperCAmelCase , pre_tok_state.pop('''type''' ) )
_snake_case : Any = add_prefix_space
_snake_case : List[str] = pre_tok_class(**_UpperCAmelCase )
_snake_case : Any = add_prefix_space
def __a ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
'''simple docstring'''
_snake_case : List[Any] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def __a ( self , lowercase_ ) -> List[int]:
'''simple docstring'''
_snake_case : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [self.eos_token_id] )
if len(_UpperCAmelCase ) > self.model_max_length:
_snake_case : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 326 |
import numpy as np
def _snake_case (__lowercase):
return 1 / (1 + np.exp(-vector))
def _snake_case (__lowercase):
return vector * sigmoid(__lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
a = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 169 |
import math
from datetime import datetime, timedelta
def _snake_case (__lowercase):
UpperCamelCase_ = year % 19
UpperCamelCase_ = year % 4
UpperCamelCase_ = year % 7
UpperCamelCase_ = math.floor(year / 100)
UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25)
UpperCamelCase_ = leap_day_inhibits / 4
UpperCamelCase_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 19)
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 18)
else:
return datetime(__lowercase , 3 , 22) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday))
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
snake_case__ : Dict = """will be""" if year > datetime.now().year else """was"""
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 23 | 0 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : str = [False] * len(__lowercase )
__a : Optional[Any] = [-1] * len(__lowercase )
def dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : List[Any] = True
__a : Any = c
for u in graph[v]:
if not visited[u]:
dfs(__lowercase , 1 - c )
for i in range(len(__lowercase ) ):
if not visited[i]:
dfs(__lowercase , 0 )
for i in range(len(__lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 597 |
import requests
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = {'Content-Type': 'application/json'}
UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase)
if response.status_code != 200:
UpperCamelCase_ = (
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__lowercase)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 23 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A_ ( ctypes.Structure ):
_SCREAMING_SNAKE_CASE = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def __A ( ):
"""simple docstring"""
if os.name == "nt":
__a = CursorInfo()
__a = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
__a = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def __A ( ):
"""simple docstring"""
if os.name == "nt":
__a = CursorInfo()
__a = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
__a = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def __A ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 197 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCamelCase_ = input_file.read()
UpperCamelCase_ = regexp.search(_UpperCAmelCase )
return match
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCamelCase_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase_ = regexp.finditer(_UpperCAmelCase )
UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 23 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
_a = 42
_a = 42
_a = None
_a = None
_a = None
@dataclass(frozen=UpperCAmelCase__ )
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
_a = 42
_a = None
_a = None
_a = None
_a = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE_ (UpperCAmelCase__ ):
'''simple docstring'''
_a = 42
def __init__( self : Tuple , __a : str , __a : Dict , __a : Union[str, Any] , __a : Optional[Any] = None , __a : List[str]=False , __a : Optional[int] = False , ) ->Optional[Any]:
lowerCamelCase_ : List[str] = hans_processors[task]()
lowerCamelCase_ : List[Any] = os.path.join(
_UpperCAmelCase , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) , _UpperCAmelCase , ) , )
lowerCamelCase_ : Tuple = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_, lowerCamelCase_ : str = label_list[2], label_list[1]
lowerCamelCase_ : Union[str, Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ : Tuple = cached_features_file + """.lock"""
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
lowerCamelCase_ : List[str] = torch.load(_UpperCAmelCase )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
lowerCamelCase_ : str = (
processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
)
logger.info("""Training examples: %s""" , len(_UpperCAmelCase ) )
lowerCamelCase_ : Optional[Any] = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info("""Saving features into cached file %s""" , _UpperCAmelCase )
torch.save(self.features , _UpperCAmelCase )
def __len__( self : Tuple ) ->int:
return len(self.features )
def __getitem__( self : Optional[int] , __a : List[Any] ) ->InputFeatures:
return self.features[i]
def _lowerCAmelCase ( self : int ) ->List[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
_a = 42
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] = 128 , __a : List[str]=False , __a : Union[str, Any] = False , ) ->Dict:
lowerCamelCase_ : Optional[int] = hans_processors[task]()
lowerCamelCase_ : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_, lowerCamelCase_ : Tuple = label_list[2], label_list[1]
lowerCamelCase_ : Optional[Any] = label_list
lowerCamelCase_ : Optional[int] = processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
lowerCamelCase_ : int = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(_UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase_ : str = tf.data.Dataset.from_generator(
_UpperCAmelCase , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self : int ) ->List[Any]:
return self.dataset
def __len__( self : Any ) ->str:
return len(self.features )
def __getitem__( self : str , __a : Optional[int] ) ->InputFeatures:
return self.features[i]
def _lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
return self.label_list
class SCREAMING_SNAKE_CASE_ (UpperCAmelCase__ ):
'''simple docstring'''
def _lowerCAmelCase ( self : List[str] , __a : Optional[int] ) ->Optional[Any]:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , """heuristics_train_set.txt""" ) ) , """train""" )
def _lowerCAmelCase ( self : int , __a : Any ) ->List[str]:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def _lowerCAmelCase ( self : Dict ) ->List[str]:
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self : str , __a : List[Any] , __a : str ) ->Dict:
lowerCamelCase_ : int = []
for i, line in enumerate(_UpperCAmelCase ):
if i == 0:
continue
lowerCamelCase_ : Dict = """%s-%s""" % (set_type, line[0])
lowerCamelCase_ : Union[str, Any] = line[5]
lowerCamelCase_ : Tuple = line[6]
lowerCamelCase_ : List[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
lowerCamelCase_ : int = line[0]
examples.append(InputExample(guid=_UpperCAmelCase , text_a=_UpperCAmelCase , text_b=_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
return examples
def __lowerCamelCase ( A__ : Any , A__ : Union[str, Any] , A__ : Tuple , A__ : List[str] , ) -> List[str]:
lowerCamelCase_ : Any = {label: i for i, label in enumerate(__lowercase )}
lowerCamelCase_ : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__lowercase ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
lowerCamelCase_ : Optional[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowercase , max_length=__lowercase , padding="""max_length""" , truncation=__lowercase , return_overflowing_tokens=__lowercase , )
lowerCamelCase_ : Tuple = label_map[example.label] if example.label in label_map else 0
lowerCamelCase_ : str = int(example.pairID )
features.append(InputFeatures(**__lowercase , label=__lowercase , pairID=__lowercase ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f'''guid: {example}''' )
logger.info(f'''features: {features[i]}''' )
return features
snake_case__ : str = {
"""hans""": 3,
}
snake_case__ : Optional[Any] = {
"""hans""": HansProcessor,
}
| 278 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ):
set_seed(3)
# generate train_data and objective_set
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
__lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# load pretrained model
UpperCamelCase_ = load_gpta('gpt2').to(__lowercase)
print('computing perplexity on objective set')
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item()
print('perplexity on objective set:' , __lowercase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ):
set_seed(42)
# Load pre-trained model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
# Initialize secondary learner to use embedding weights of model
UpperCamelCase_ = SecondaryLearner(__lowercase)
# Train secondary learner
UpperCamelCase_ = train_secondary_learner(
__lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ):
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
UpperCamelCase_ = RandomSampler(__lowercase)
UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase)
UpperCamelCase_ = max_steps // (len(__lowercase)) + 1
UpperCamelCase_ = 0
UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase)
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowercase)
secondary_learner.eval()
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = []
UpperCamelCase_ = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
for epoch in range(int(__lowercase)):
for step, example in enumerate(__lowercase):
torch.cuda.empty_cache()
UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1)
UpperCamelCase_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase_ = model(__lowercase , labels=__lowercase)
UpperCamelCase_ = True
if secondary_learner is not None:
UpperCamelCase_ = secondary_learner.forward(
torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item()
observed_qs.append(float(__lowercase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase_ = -1
if predicted_q < threshold:
UpperCamelCase_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
UpperCamelCase_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __lowercase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task')
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=__lowercase , default=__lowercase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.')
parser.add_argument(
'--context_len' , default=32 , type=__lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq')
parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs')
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ')
parser.add_argument(
'--eval_interval' , default=10 , type=__lowercase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data')
parser.add_argument(
'--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set')
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner')
parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length')
parser.add_argument(
'--threshold' , default=1.0 , type=__lowercase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name')
parser.add_argument(
'--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
UpperCamelCase_ = joblib.load('data/IGF_values.jbl')
# Train secondary learner
UpperCamelCase_ = training_secondary_learner(
__lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 23 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple ) ->None:
'''simple docstring'''
_UpperCamelCase : int = []
_UpperCamelCase : Any = 0
_UpperCamelCase : Union[str, Any] = 0
def snake_case__ ( self : str ) ->bool:
'''simple docstring'''
return self.head == self.tail
def snake_case__ ( self : Any , lowercase__ : Optional[int] ) ->None:
'''simple docstring'''
self.data.append(_UpperCAmelCase )
_UpperCamelCase : List[Any] = self.tail + 1
def snake_case__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCamelCase : int = self.data[self.head]
_UpperCamelCase : Optional[Any] = self.head + 1
return ret
def snake_case__ ( self : int ) ->int:
'''simple docstring'''
return self.tail - self.head
def snake_case__ ( self : Optional[int] ) ->None:
'''simple docstring'''
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , lowercase__ : List[str] ) ->None:
'''simple docstring'''
_UpperCamelCase : Dict = data
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : int = None
_UpperCamelCase : int = 1
def snake_case__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
return self.data
def snake_case__ ( self : Optional[int] ) ->MyNode | None:
'''simple docstring'''
return self.left
def snake_case__ ( self : Optional[Any] ) ->MyNode | None:
'''simple docstring'''
return self.right
def snake_case__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
return self.height
def snake_case__ ( self : List[Any] , lowercase__ : Dict ) ->None:
'''simple docstring'''
_UpperCamelCase : Any = data
def snake_case__ ( self : Dict , lowercase__ : int ) ->None:
'''simple docstring'''
_UpperCamelCase : Tuple = node
def snake_case__ ( self : List[Any] , lowercase__ : List[Any] ) ->None:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = node
def snake_case__ ( self : Tuple , lowercase__ : Dict ) ->None:
'''simple docstring'''
_UpperCamelCase : int = height
def __A ( UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> str:
'''simple docstring'''
if a > b:
return a
return b
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
print("left rotation node:" ,node.get_data() )
_UpperCamelCase : Union[str, Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__lowercase )
_UpperCamelCase : str = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
_UpperCamelCase : Any = my_max(get_height(ret.get_right() ) ,get_height(ret.get_left() ) ) + 1
ret.set_height(__lowercase )
return ret
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
print("right rotation node:" ,node.get_data() )
_UpperCamelCase : Union[str, Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__lowercase )
_UpperCamelCase : Tuple = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
_UpperCamelCase : Union[str, Any] = my_max(get_height(ret.get_right() ) ,get_height(ret.get_left() ) ) + 1
ret.set_height(__lowercase )
return ret
def __A ( UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : str = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__lowercase ) )
return right_rotation(__lowercase )
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__lowercase ) )
return left_rotation(__lowercase )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Dict:
'''simple docstring'''
if node is None:
return MyNode(__lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() ,__lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_UpperCamelCase : List[str] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_UpperCamelCase : Union[str, Any] = right_rotation(__lowercase )
else:
_UpperCamelCase : Any = lr_rotation(__lowercase )
else:
node.set_right(insert_node(node.get_right() ,__lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_UpperCamelCase : str = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_UpperCamelCase : Any = rl_rotation(__lowercase )
else:
_UpperCamelCase : Optional[int] = left_rotation(__lowercase )
_UpperCamelCase : int = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
return node
def __A ( UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
while True:
_UpperCamelCase : Union[str, Any] = root.get_right()
if right_child is None:
break
_UpperCamelCase : List[Any] = right_child
return root.get_data()
def __A ( UpperCAmelCase ) -> Any:
'''simple docstring'''
while True:
_UpperCamelCase : Optional[int] = root.get_left()
if left_child is None:
break
_UpperCamelCase : List[Any] = left_child
return root.get_data()
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : str = root.get_left()
_UpperCamelCase : Optional[Any] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_UpperCamelCase : List[Any] = get_left_most(__lowercase )
root.set_data(__lowercase )
root.set_right(del_node(__lowercase ,__lowercase ) )
elif left_child is not None:
_UpperCamelCase : List[str] = left_child
elif right_child is not None:
_UpperCamelCase : Union[str, Any] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(__lowercase ,__lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__lowercase ,__lowercase ) )
if get_height(__lowercase ) - get_height(__lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_UpperCamelCase : List[str] = left_rotation(__lowercase )
else:
_UpperCamelCase : Tuple = rl_rotation(__lowercase )
elif get_height(__lowercase ) - get_height(__lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_UpperCamelCase : Tuple = right_rotation(__lowercase )
else:
_UpperCamelCase : Optional[Any] = lr_rotation(__lowercase )
_UpperCamelCase : int = my_max(get_height(root.get_right() ) ,get_height(root.get_left() ) ) + 1
root.set_height(__lowercase )
return root
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any ) ->None:
'''simple docstring'''
_UpperCamelCase : Optional[int] = None
def snake_case__ ( self : str ) ->int:
'''simple docstring'''
return get_height(self.root )
def snake_case__ ( self : Dict , lowercase__ : Any ) ->None:
'''simple docstring'''
print("insert:" + str(_UpperCAmelCase ) )
_UpperCamelCase : Optional[int] = insert_node(self.root , _UpperCAmelCase )
def snake_case__ ( self : Tuple , lowercase__ : Optional[Any] ) ->None:
'''simple docstring'''
print("delete:" + str(_UpperCAmelCase ) )
if self.root is None:
print("Tree is empty!" )
return
_UpperCamelCase : str = del_node(self.root , _UpperCAmelCase )
def __str__( self : Union[str, Any] , ) ->str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
_UpperCamelCase : Optional[Any] = ""
_UpperCamelCase : Any = MyQueue()
q.push(self.root )
_UpperCamelCase : Union[str, Any] = self.get_height()
if layer == 0:
return output
_UpperCamelCase : Dict = 0
while not q.is_empty():
_UpperCamelCase : int = q.pop()
_UpperCamelCase : Dict = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_UpperCAmelCase )
q.push(_UpperCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
_UpperCamelCase : List[str] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _UpperCAmelCase ) - 1:
_UpperCamelCase : List[Any] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __A ( ) -> List[str]:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowerCAmelCase_ : Optional[Any] = AVLtree()
lowerCAmelCase_ : Union[str, Any] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 435 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _a :
"""simple docstring"""
A_ = MBartConfig
A_ = {}
A_ = """gelu"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Union[str, Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_ = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = TFMBartModel(config=_UpperCAmelCase ).get_decoder()
UpperCamelCase_ = inputs_dict['input_ids']
UpperCamelCase_ = input_ids[:1, :]
UpperCamelCase_ = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_ = inputs_dict['head_mask']
UpperCamelCase_ = 1
# first forward pass
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
UpperCamelCase_ = past_key_values[1]
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ):
if attention_mask is None:
UpperCamelCase_ = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A_ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ = True
A_ = False
A_ = False
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = TFMBartModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
A_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
A_ = """facebook/mbart-large-en-ro"""
@cached_property
def _UpperCAmelCase ( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> int:
UpperCamelCase_ = self.translate_src_text(**_UpperCAmelCase )
self.assertListEqual(self.expected_text , _UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' )
UpperCamelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase_ = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 23 | 0 |
'''simple docstring'''
import math
def A_( A : Dict):
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = int(math.sqrt(__lowercase)) # Size of every segment
UpperCamelCase = [True] * (end + 1)
UpperCamelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(__lowercase)
for i in range(start * start , end + 1 , __lowercase):
UpperCamelCase = False
start += 1
prime += in_prime
UpperCamelCase = end + 1
UpperCamelCase = min(2 * end , __lowercase)
while low <= n:
UpperCamelCase = [True] * (high - low + 1)
for each in in_prime:
UpperCamelCase = math.floor(low / each) * each
if t < low:
t += each
for j in range(__lowercase , high + 1 , __lowercase):
UpperCamelCase = False
for j in range(len(__lowercase)):
if temp[j] is True:
prime.append(j + low)
UpperCamelCase = high + 1
UpperCamelCase = min(high + end , __lowercase)
return prime
print(sieve(10**6))
| 3 |
def _snake_case (__lowercase):
UpperCamelCase_ = 1
for i in range(1 , num + 1):
fact *= i
return fact
def _snake_case (__lowercase):
UpperCamelCase_ = 0
while number > 0:
UpperCamelCase_ = number % 10
sum_of_digits += last_digit
UpperCamelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case (__lowercase = 100):
UpperCamelCase_ = factorial(__lowercase)
UpperCamelCase_ = split_and_add(__lowercase)
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 23 | 0 |
'''simple docstring'''
from statistics import mean, stdev
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] = 3 ) -> Union[str, Any]:
__snake_case = min(__lowercase )
__snake_case = max(__lowercase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __lowercase ) for x in data]
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict = 3 ) -> Optional[int]:
__snake_case = mean(__lowercase )
__snake_case = stdev(__lowercase )
# standardize data
return [round((x - mu) / (sigma) , __lowercase ) for x in data]
| 69 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case__ : str = logging.get_logger(__name__)
def _snake_case (__lowercase):
if isinstance(__lowercase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(__lowercase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(__lowercase):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""")
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""pixel_values"""]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = size if size is not None else {'shortest_edge': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = resample
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size['shortest_edge'] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
UpperCamelCase_ = (size['height'], size['width'])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> int:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_ = to_numpy_array(_UpperCAmelCase )
if do_resize:
UpperCamelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
UpperCamelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
UpperCamelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase )
if do_normalize:
UpperCamelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
UpperCamelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image:
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCamelCase_ = make_batched(_UpperCAmelCase )
UpperCamelCase_ = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
UpperCamelCase_ = {'pixel_values': videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 23 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
A__ : List[str] = TypeVar('''T''')
def UpperCamelCase( __UpperCamelCase : List[Any] ):
return (position - 1) // 2
def UpperCamelCase( __UpperCamelCase : Optional[Any] ):
return (2 * position) + 1
def UpperCamelCase( __UpperCamelCase : Dict ):
return (2 * position) + 2
class __snake_case ( Generic[T] ):
def __init__( self : Any):
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Optional[Any] = 0
def __len__( self : Union[str, Any]):
return self.elements
def __repr__( self : Optional[Any]):
return str(self.heap)
def UpperCAmelCase__ ( self : List[str]):
# Check if the priority queue is empty
return self.elements == 0
def UpperCAmelCase__ ( self : List[Any] , A_ : Dict , A_ : Dict):
# Add an element with given priority to the queue
self.heap.append((elem, weight))
lowerCAmelCase_ : Optional[int] = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase)
def UpperCAmelCase__ ( self : Optional[Any]):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.heap[0]
self._bubble_down(_UpperCAmelCase)
return elem
def UpperCAmelCase__ ( self : Optional[Any] , A_ : List[str] , A_ : Dict):
# Update the weight of the given key
lowerCAmelCase_ : Union[str, Any] = self.position_map[elem]
lowerCAmelCase_ : str = (elem, weight)
if position > 0:
lowerCAmelCase_ : Any = get_parent_position(_UpperCAmelCase)
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase)
else:
self._bubble_down(_UpperCAmelCase)
else:
self._bubble_down(_UpperCAmelCase)
def UpperCAmelCase__ ( self : List[Any] , A_ : List[str]):
# Place a node at the proper position (upward movement) [to be used internally
# only]
lowerCAmelCase_ : Tuple = self.position_map[elem]
if curr_pos == 0:
return None
lowerCAmelCase_ : Optional[int] = get_parent_position(_UpperCAmelCase)
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.heap[curr_pos]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase)
return self._bubble_up(_UpperCAmelCase)
return None
def UpperCAmelCase__ ( self : Optional[int] , A_ : Union[str, Any]):
# Place a node at the proper position (downward movement) [to be used
# internally only]
lowerCAmelCase_ : int = self.position_map[elem]
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.heap[curr_pos]
lowerCAmelCase_ : Optional[Any] = get_child_left_position(_UpperCAmelCase)
lowerCAmelCase_ : Dict = get_child_right_position(_UpperCAmelCase)
if child_left_position < self.elements and child_right_position < self.elements:
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.heap[child_left_position]
lowerCAmelCase_ , lowerCAmelCase_ : str = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase)
return self._bubble_down(_UpperCAmelCase)
if child_left_position < self.elements:
lowerCAmelCase_ , lowerCAmelCase_ : int = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase)
return self._bubble_down(_UpperCAmelCase)
else:
return None
if child_right_position < self.elements:
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase)
return self._bubble_down(_UpperCAmelCase)
return None
def UpperCAmelCase__ ( self : Any , A_ : List[Any] , A_ : int):
# Swap the nodes at the given positions
lowerCAmelCase_ : Any = self.heap[nodea_pos][0]
lowerCAmelCase_ : Union[str, Any] = self.heap[nodea_pos][0]
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCAmelCase_ : Tuple = nodea_pos
lowerCAmelCase_ : Any = nodea_pos
class __snake_case ( Generic[T] ):
def __init__( self : int):
lowerCAmelCase_ : int = {}
lowerCAmelCase_ : int = 0
def __repr__( self : Tuple):
return str(self.connections)
def __len__( self : Tuple):
return self.nodes
def UpperCAmelCase__ ( self : Any , A_ : str):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
lowerCAmelCase_ : Optional[int] = {}
self.nodes += 1
def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[Any] , A_ : Tuple , A_ : List[Any]):
# Add an edge between 2 nodes in the graph
self.add_node(_UpperCAmelCase)
self.add_node(_UpperCAmelCase)
lowerCAmelCase_ : Tuple = weight
lowerCAmelCase_ : List[Any] = weight
def UpperCamelCase( __UpperCamelCase : Any ,):
lowerCAmelCase_ : Union[str, Any] = {node: maxsize for node in graph.connections}
lowerCAmelCase_ : Dict = {node: None for node in graph.connections}
lowerCAmelCase_ : Union[str, Any] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowercase ,__lowercase )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCAmelCase_ : List[str] = priority_queue.extract_min()
lowerCAmelCase_ : List[str] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase_ : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase ,dist[neighbour] )
lowerCAmelCase_ : List[str] = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCAmelCase_ : Dict = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase_ : str = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase ,dist[neighbour] )
lowerCAmelCase_ : Dict = node
return dist, parent
| 171 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
class _a ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.1_5 , _UpperCAmelCase = 0.0_1 , _UpperCAmelCase = 1_3_4_8.0 , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
UpperCamelCase_ = sigma_max
# setable values
UpperCamelCase_ = None
self.set_sigmas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor:
return sample
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> str:
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase_ = torch.linspace(1 , _UpperCAmelCase , _UpperCAmelCase , device=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Any:
UpperCamelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase_ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ) , math.log(_UpperCAmelCase ) , _UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
UpperCamelCase_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase_ = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase_ = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase_ = self.get_adjacent_sigma(_UpperCAmelCase , _UpperCAmelCase ).to(sample.device )
UpperCamelCase_ = torch.zeros_like(_UpperCAmelCase )
UpperCamelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase_ = diffusion.unsqueeze(-1 )
UpperCamelCase_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase_ = randn_tensor(
sample.shape , layout=sample.layout , generator=_UpperCAmelCase , device=sample.device , dtype=sample.dtype )
UpperCamelCase_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_UpperCAmelCase , prev_sample_mean=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase_ = step_size.unsqueeze(-1 )
UpperCamelCase_ = sample + step_size * model_output
UpperCamelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase_ = timesteps.to(original_samples.device )
UpperCamelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None]
)
UpperCamelCase_ = noise + original_samples
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 23 | 0 |
import re
def lowercase ( __A : Optional[int] ) -> Dict:
'''simple docstring'''
return [char.split() for char in re.split(r"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def lowercase ( __A : List[str] ) -> int:
'''simple docstring'''
snake_case : Any = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowercase ( __A : int , __A : Optional[Any] , __A : Any ) -> Dict:
'''simple docstring'''
try:
snake_case : Union[str, Any] = split_input(__lowercase )
if upper:
snake_case : List[Any] = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
snake_case : Tuple = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowercase ( __A : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return to_simple_case(__lowercase )
def lowercase ( __A : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
try:
snake_case : List[Any] = to_simple_case(__lowercase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowercase ( __A : int , __A : int ) -> Optional[int]:
'''simple docstring'''
return to_complex_case(__lowercase , __lowercase , """_""" )
def lowercase ( __A : str , __A : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return to_complex_case(__lowercase , __lowercase , """-""" )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 36 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[int] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 | 0 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class lowerCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int ,a__ : str ,a__ : Optional[int] ,a__ : Any ,a__ : Dict ,a__ : List[Any]=1 ,a__ : Tuple=False ,**a__ : Tuple ):
super().__init__(**_UpperCAmelCase )
a__ = vocab_size
a__ = d_embed
a__ = d_proj
a__ = cutoffs + [vocab_size]
a__ = [0] + self.cutoffs
a__ = div_val
a__ = self.cutoffs[0]
a__ = len(self.cutoffs ) - 1
a__ = self.shortlist_size + self.n_clusters
a__ = keep_order
a__ = []
a__ = []
def lowerCAmelCase_ ( self : int ,a__ : Union[str, Any] ):
if self.n_clusters > 0:
a__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) ,initializer="zeros" ,trainable=_UpperCAmelCase ,name="cluster_weight" )
a__ = self.add_weight(
shape=(self.n_clusters,) ,initializer="zeros" ,trainable=_UpperCAmelCase ,name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
a__ = self.add_weight(
shape=(self.d_embed, self.d_proj) ,initializer="zeros" ,trainable=_UpperCAmelCase ,name=f'out_projs_._{i}' ,)
self.out_projs.append(_UpperCAmelCase )
else:
self.out_projs.append(_UpperCAmelCase )
a__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) ,initializer="zeros" ,trainable=_UpperCAmelCase ,name=f'out_layers_._{i}_._weight' ,)
a__ = self.add_weight(
shape=(self.vocab_size,) ,initializer="zeros" ,trainable=_UpperCAmelCase ,name=f'out_layers_._{i}_._bias' ,)
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
a__ , a__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a__ = self.d_embed // (self.div_val**i)
a__ = self.add_weight(
shape=(d_emb_i, self.d_proj) ,initializer="zeros" ,trainable=_UpperCAmelCase ,name=f'out_projs_._{i}' )
self.out_projs.append(_UpperCAmelCase )
a__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) ,initializer="zeros" ,trainable=_UpperCAmelCase ,name=f'out_layers_._{i}_._weight' ,)
a__ = self.add_weight(
shape=(r_idx - l_idx,) ,initializer="zeros" ,trainable=_UpperCAmelCase ,name=f'out_layers_._{i}_._bias' ,)
self.out_layers.append((weight, bias) )
super().build(_UpperCAmelCase )
@staticmethod
def lowerCAmelCase_ ( a__ : Tuple ,a__ : int ,a__ : List[Any] ,a__ : List[Any]=None ):
a__ = x
if proj is not None:
a__ = tf.einsum("ibd,ed->ibe" ,_UpperCAmelCase ,_UpperCAmelCase )
return tf.einsum("ibd,nd->ibn" ,_UpperCAmelCase ,_UpperCAmelCase ) + b
@staticmethod
def lowerCAmelCase_ ( a__ : List[Any] ,a__ : Dict ):
a__ = shape_list(_UpperCAmelCase )
a__ = tf.range(lp_size[0] ,dtype=target.dtype )
a__ = tf.stack([r, target] ,1 )
return tf.gather_nd(_UpperCAmelCase ,_UpperCAmelCase )
def lowerCAmelCase_ ( self : int ,a__ : List[Any] ,a__ : Any ,a__ : List[Any]=True ,a__ : List[Any]=False ):
a__ = 0
if self.n_clusters == 0:
a__ = self._logit(_UpperCAmelCase ,self.out_layers[0][0] ,self.out_layers[0][1] ,self.out_projs[0] )
if target is not None:
a__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_UpperCAmelCase ,logits=_UpperCAmelCase )
a__ = tf.nn.log_softmax(_UpperCAmelCase ,axis=-1 )
else:
a__ = shape_list(_UpperCAmelCase )
a__ = []
a__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
a__ , a__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
a__ = (target >= l_idx) & (target < r_idx)
a__ = tf.where(_UpperCAmelCase )
a__ = tf.boolean_mask(_UpperCAmelCase ,_UpperCAmelCase ) - l_idx
if self.div_val == 1:
a__ = self.out_layers[0][0][l_idx:r_idx]
a__ = self.out_layers[0][1][l_idx:r_idx]
else:
a__ = self.out_layers[i][0]
a__ = self.out_layers[i][1]
if i == 0:
a__ = tf.concat([cur_W, self.cluster_weight] ,0 )
a__ = tf.concat([cur_b, self.cluster_bias] ,0 )
a__ = self._logit(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,self.out_projs[0] )
a__ = tf.nn.log_softmax(_UpperCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
a__ = tf.boolean_mask(_UpperCAmelCase ,_UpperCAmelCase )
a__ = self._gather_logprob(_UpperCAmelCase ,_UpperCAmelCase )
else:
a__ = self._logit(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,self.out_projs[i] )
a__ = tf.nn.log_softmax(_UpperCAmelCase )
a__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
a__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_UpperCAmelCase )
if target is not None:
a__ = tf.boolean_mask(_UpperCAmelCase ,_UpperCAmelCase )
a__ = tf.boolean_mask(_UpperCAmelCase ,_UpperCAmelCase )
a__ = self._gather_logprob(_UpperCAmelCase ,_UpperCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_UpperCAmelCase ,-cur_logprob ,shape_list(_UpperCAmelCase ) )
a__ = tf.concat(_UpperCAmelCase ,axis=-1 )
if target is not None:
if return_mean:
a__ = tf.reduce_mean(_UpperCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_UpperCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_UpperCAmelCase ,name=self.name ,aggregation="mean" if return_mean else "" )
return out
| 331 |
import datasets
from .evaluate import evaluate
snake_case__ : int = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
snake_case__ : Union[str, Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
snake_case__ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
UpperCamelCase_ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 23 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A_ ( lowercase_ = 3 ) -> Tuple:
if isinstance(__lowercase , __lowercase ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(__lowercase ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_snake_case : List[str] = QuantumRegister(__lowercase , '''qr''' )
_snake_case : Optional[int] = ClassicalRegister(__lowercase , '''cr''' )
_snake_case : Tuple = QuantumCircuit(__lowercase , __lowercase )
_snake_case : str = number_of_qubits
for i in range(__lowercase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__lowercase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __lowercase , __lowercase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__lowercase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__lowercase , __lowercase )
# simulate with 10000 shots
_snake_case : str = Aer.get_backend('''qasm_simulator''' )
_snake_case : List[str] = execute(__lowercase , __lowercase , shots=10000 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 326 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def _snake_case ():
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
def _snake_case ():
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@require_beam
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> List[str]:
import apache_beam as beam
UpperCamelCase_ = beam.io.parquetio.WriteToParquet
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 23 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any]=13 , lowerCAmelCase : Tuple=30 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Tuple=3 , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Union[str, Any]=5 , lowerCAmelCase : int=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : str=10 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]=2 , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase = (image_size // patch_size) ** 2
lowerCAmelCase = num_patches + 2
def __lowercase ( self : List[str] ):
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowercase ( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
lowerCAmelCase = DeiTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCAmelCase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : str ):
lowerCAmelCase = DeiTForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCAmelCase = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = DeiTForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ):
lowerCAmelCase = self.type_sequence_label_size
lowerCAmelCase = DeiTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCAmelCase = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = DeiTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
_a = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_a = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
def __lowercase ( self : Any ):
lowerCAmelCase = DeiTModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def __lowercase ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : List[str] ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def __lowercase ( self : Dict ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_UpperCAmelCase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=False ):
lowerCAmelCase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowercase ( self : str ):
if not self.model_tester.is_training:
return
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowerCAmelCase = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
lowerCAmelCase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
lowerCAmelCase = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase = False
lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowerCAmelCase = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
lowerCAmelCase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
lowerCAmelCase = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ):
lowerCAmelCase = problem_type["""title"""]
lowerCAmelCase = problem_type["""num_labels"""]
lowerCAmelCase = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
lowerCAmelCase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
lowerCAmelCase = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowerCAmelCase = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
lowerCAmelCase = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __lowercase ( self : List[str] ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = DeiTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowercase () -> str:
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Union[str, Any] ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def __lowercase ( self : Any ):
lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_UpperCAmelCase )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**_UpperCAmelCase )
# verify the logits
lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowerCAmelCase = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowercase ( self : int ):
lowerCAmelCase = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" )
lowerCAmelCase = inputs.pixel_values.to(_UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCAmelCase = model(_UpperCAmelCase )
| 169 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = AlbertForPreTraining(__lowercase)
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class lowerCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , *_A , **_A ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 597 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase_ = bertabert.config.encoder.vocab_size
UpperCamelCase_ = tokenizer.sep_token_id
UpperCamelCase_ = tokenizer.cls_token_id
UpperCamelCase_ = 128
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase_ = train_dataset.select(range(32 ) )
UpperCamelCase_ = val_dataset.select(range(16 ) )
UpperCamelCase_ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase_ = inputs.input_ids
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = outputs.input_ids
UpperCamelCase_ = outputs.input_ids.copy()
UpperCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase_ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
UpperCamelCase_ = pred.label_ids
UpperCamelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase_ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 23 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A_ ( UpperCAmelCase__ ):
@staticmethod
@abstractmethod
def _UpperCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] ):
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self : Dict ):
raise NotImplementedError()
| 197 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : Dict = 1_6
snake_case__ : List[str] = 3_2
def _snake_case (__lowercase , __lowercase = 16):
UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase_ = load_dataset('glue' , 'mrpc')
def tokenize_function(__lowercase):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(__lowercase):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ = 8
else:
UpperCamelCase_ = None
return tokenizer.pad(
__lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
UpperCamelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case__ : List[str] = mocked_dataloaders # noqa: F811
def _snake_case (__lowercase , __lowercase):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1":
UpperCamelCase_ = 2
# New Code #
UpperCamelCase_ = int(args.gradient_accumulation_steps)
# Initialize accelerator
UpperCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config['lr']
UpperCamelCase_ = int(config['num_epochs'])
UpperCamelCase_ = int(config['seed'])
UpperCamelCase_ = int(config['batch_size'])
UpperCamelCase_ = evaluate.load('glue' , 'mrpc')
set_seed(__lowercase)
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase)
# Instantiate scheduler
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# Now we train the model
for epoch in range(__lowercase):
model.train()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowercase):
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = output.loss
accelerator.backward(__lowercase)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = outputs.logits.argmax(dim=-1)
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
UpperCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowercase)
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowercase , __lowercase)
if __name__ == "__main__":
main()
| 23 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
snake_case__ : int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ (UpperCAmelCase__ ):
'''simple docstring'''
_a = ["audio_values", "audio_mask"]
def __init__( self : Union[str, Any] , __a : Optional[int]=2_048 , __a : List[str]=1 , __a : Optional[Any]=[16, 16] , __a : Optional[int]=128 , __a : List[str]=44_100 , __a : int=86 , __a : Any=2_048 , __a : List[str]=0.0 , **__a : Any , ) ->Optional[Any]:
super().__init__(
feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase , )
lowerCamelCase_ : Optional[Any] = spectrogram_length
lowerCamelCase_ : Optional[int] = num_channels
lowerCamelCase_ : Union[str, Any] = patch_size
lowerCamelCase_ : Optional[Any] = feature_size // self.patch_size[1]
lowerCamelCase_ : List[str] = n_fft
lowerCamelCase_ : Dict = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase_ : Optional[int] = sampling_rate
lowerCamelCase_ : Optional[Any] = padding_value
lowerCamelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_UpperCAmelCase , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_UpperCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _lowerCAmelCase ( self : Union[str, Any] , __a : List[Any] ) ->np.ndarray:
lowerCamelCase_ : int = spectrogram(
_UpperCAmelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCamelCase_ : int = log_spec[:, :-1]
lowerCamelCase_ : Optional[int] = log_spec - 20.0
lowerCamelCase_ : List[Any] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Any , __a : Any , __a : Tuple = None , __a : Any = True , __a : List[str] = None , __a : List[str] = False , __a : Optional[int] = False , **__a : Optional[Any] , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase_ : str = isinstance(_UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase_ : Any = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ : Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
lowerCamelCase_ : List[Any] = np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ : Optional[int] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase_ : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _UpperCAmelCase ):
lowerCamelCase_ : int = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase_ : int = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase_ : Union[str, Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase_ : Dict = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase_ : str = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase_ : Optional[Any] = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase_ : Any = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowerCamelCase_ : Tuple = audio_features[i]
lowerCamelCase_ : List[Any] = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase_ : Optional[int] = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCamelCase_ : List[Any] = {"""audio_values""": padded_audio_features}
lowerCamelCase_ : Any = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
return encoded_inputs
| 278 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = is_training
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = num_queries
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_size
UpperCamelCase_ = max_size
UpperCamelCase_ = num_labels
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = hidden_dim
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
UpperCamelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
UpperCamelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase_ = self.num_queries
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = [1, 1, 1, 1]
UpperCamelCase_ = self.num_channels
UpperCamelCase_ = 64
UpperCamelCase_ = 128
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
return config
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = output.encoder_hidden_states
UpperCamelCase_ = output.pixel_decoder_hidden_states
UpperCamelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any:
with torch.no_grad():
UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
UpperCamelCase_ = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = (self.model_tester.min_size,) * 2
UpperCamelCase_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
UpperCamelCase_ = self.model_tester.get_config()
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
UpperCamelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case__ : List[Any] = 1E-4
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# masks_queries_logits
UpperCamelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
UpperCamelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase )
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']]
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 23 | 0 |
'''simple docstring'''
def __A ( UpperCAmelCase = 1_0 ,UpperCAmelCase = 2_2 ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = range(1 ,__lowercase )
_UpperCamelCase : Tuple = range(1 ,__lowercase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""")
| 435 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """vision-encoder-decoder"""
A_ = True
def __init__( self , **_UpperCAmelCase ) -> Dict:
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCamelCase_ = kwargs.pop('encoder' )
UpperCamelCase_ = encoder_config.pop('model_type' )
UpperCamelCase_ = kwargs.pop('decoder' )
UpperCamelCase_ = decoder_config.pop('model_type' )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = True
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCamelCase_ = True
UpperCamelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.encoder.to_dict()
UpperCamelCase_ = self.decoder.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
import torch
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = super().generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape
UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_ = dummy_input.pop('input_ids' )
UpperCamelCase_ = dummy_input.pop('attention_mask' )
UpperCamelCase_ = torch.zeros(_UpperCAmelCase )
return common_inputs
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> None:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig:
UpperCamelCase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
| 23 | 0 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE = "cpu" , __SCREAMING_SNAKE_CASE = "openai/clip-vit-large-patch14" ) -> None:
'''simple docstring'''
__snake_case = device
__snake_case = CLIPTokenizerFast.from_pretrained(__SCREAMING_SNAKE_CASE )
__snake_case = [0.48_145_466, 0.4_578_275, 0.40_821_073]
__snake_case = [0.26_862_954, 0.26_130_258, 0.27_577_711]
__snake_case = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__snake_case = torchvision.transforms.Resize(224 )
__snake_case = torchvision.transforms.CenterCrop(224 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = self.resize(__SCREAMING_SNAKE_CASE )
__snake_case = self.center_crop(__SCREAMING_SNAKE_CASE )
__snake_case = self.normalize(__SCREAMING_SNAKE_CASE )
return images
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = self.tokenizer(text=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = self.preprocess_img(__SCREAMING_SNAKE_CASE )
__snake_case = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.01 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="image" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , ) -> None:
'''simple docstring'''
super().__init__()
__snake_case = None
__snake_case = device if device else get_device()
if vqgan:
__snake_case = vqgan
else:
__snake_case = load_vqgan(self.device , conf_path=__SCREAMING_SNAKE_CASE , ckpt_path=__SCREAMING_SNAKE_CASE )
self.vqgan.eval()
if clip:
__snake_case = clip
else:
__snake_case = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
__snake_case = ProcessorGradientFlow(device=self.device )
__snake_case = iterations
__snake_case = lr
__snake_case = log
__snake_case = make_grid
__snake_case = return_val
__snake_case = quantize
__snake_case = self.vqgan.decoder.z_shape
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=True ) -> Dict:
'''simple docstring'''
__snake_case = []
if output_path is None:
__snake_case = '''./animation.gif'''
if input_path is None:
__snake_case = self.save_path
__snake_case = sorted(glob(input_path + '''/*''' ) )
if not len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(__SCREAMING_SNAKE_CASE ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
__snake_case = total_duration / len(__SCREAMING_SNAKE_CASE )
__snake_case = [frame_duration] * len(__SCREAMING_SNAKE_CASE )
if extend_frames:
__snake_case = 1.5
__snake_case = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(__SCREAMING_SNAKE_CASE ) )
imageio.mimsave(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , duration=__SCREAMING_SNAKE_CASE )
print(F'''gif saved to {output_path}''' )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
__snake_case = preprocess(Image.open(__SCREAMING_SNAKE_CASE ) , target_image_size=256 ).to(self.device )
__snake_case = preprocess_vqgan(__SCREAMING_SNAKE_CASE )
__snake_case , *__snake_case = self.vqgan.encode(__SCREAMING_SNAKE_CASE )
return z
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = self.latent.detach().requires_grad_()
__snake_case = base_latent + transform_vector
if self.quantize:
__snake_case , *__snake_case = self.vqgan.quantize(__SCREAMING_SNAKE_CASE )
else:
__snake_case = trans_latent
return self.vqgan.decode(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.clip_preprocessor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding=__SCREAMING_SNAKE_CASE )
__snake_case = self.clip(**__SCREAMING_SNAKE_CASE )
__snake_case = clip_outputs.logits_per_image
if weights is not None:
__snake_case = similarity_logits * weights
return similarity_logits.sum()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = self._get_clip_similarity(pos_prompts['''prompts'''] , __SCREAMING_SNAKE_CASE , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
__snake_case = self._get_clip_similarity(neg_prompts['''prompts'''] , __SCREAMING_SNAKE_CASE , weights=neg_prompts['''weights'''] )
else:
__snake_case = torch.tensor([1] , device=self.device )
__snake_case = -torch.log(__SCREAMING_SNAKE_CASE ) + torch.log(__SCREAMING_SNAKE_CASE )
return loss
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = torch.randn_like(self.latent , requires_grad=__SCREAMING_SNAKE_CASE , device=self.device )
__snake_case = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__snake_case = self._add_vector(__SCREAMING_SNAKE_CASE )
__snake_case = loop_post_process(__SCREAMING_SNAKE_CASE )
__snake_case = self._get_CLIP_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print('''CLIP loss''' , __SCREAMING_SNAKE_CASE )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=__SCREAMING_SNAKE_CASE )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
wandb.init(reinit=__SCREAMING_SNAKE_CASE , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
__snake_case = Image.open(__SCREAMING_SNAKE_CASE )
__snake_case = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(__SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
if not prompts:
return []
__snake_case = []
__snake_case = []
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(__SCREAMING_SNAKE_CASE , (tuple, list) ):
__snake_case = prompt[0]
__snake_case = float(prompt[1] )
elif ":" in prompt:
__snake_case , __snake_case = prompt.split(''':''' )
__snake_case = float(__SCREAMING_SNAKE_CASE )
else:
__snake_case = prompt
__snake_case = 1.0
processed_prompts.append(__SCREAMING_SNAKE_CASE )
weights.append(__SCREAMING_SNAKE_CASE )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__SCREAMING_SNAKE_CASE , device=self.device ),
}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ) -> int:
'''simple docstring'''
if image_path:
__snake_case = self._get_latent(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert pos_prompts, "You must provide at least one positive prompt."
__snake_case = self.process_prompts(__SCREAMING_SNAKE_CASE )
__snake_case = self.process_prompts(__SCREAMING_SNAKE_CASE )
if save_final and save_path is None:
__snake_case = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(__SCREAMING_SNAKE_CASE ):
os.makedirs(__SCREAMING_SNAKE_CASE )
else:
__snake_case = save_path + '''_''' + get_timestamp()
os.makedirs(__SCREAMING_SNAKE_CASE )
__snake_case = save_path
__snake_case = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(__SCREAMING_SNAKE_CASE ) )
__snake_case = loop_post_process(__SCREAMING_SNAKE_CASE )
for iter, transformed_img in enumerate(self._optimize_CLIP(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
if show_intermediate:
show_pil(__SCREAMING_SNAKE_CASE )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(__SCREAMING_SNAKE_CASE )} )
if show_final:
show_pil(__SCREAMING_SNAKE_CASE )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 24 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str=None , _lowerCamelCase : Any=None )-> List[Any]:
'''simple docstring'''
if attention_mask is None:
__snake_case = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCAmelCase :
__lowercase : List[Any] = OPTConfig
__lowercase : List[str] = {}
__lowercase : Tuple = '''gelu'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=20 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=16 , ) -> Tuple:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = eos_token_id
__snake_case = pad_token_id
__snake_case = bos_token_id
__snake_case = embed_dim
__snake_case = word_embed_proj_dim
__snake_case = False
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
__snake_case = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **self.config_updates , )
__snake_case = prepare_opt_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__snake_case = TFOPTModel(config=__SCREAMING_SNAKE_CASE )
__snake_case = inputs_dict['''input_ids''']
__snake_case = input_ids[:1, :]
__snake_case = inputs_dict['''attention_mask'''][:1, :]
__snake_case = 1
# first forward pass
__snake_case = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
__snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__snake_case = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
__snake_case = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__snake_case = output_from_no_past[:, -3:, random_slice_idx]
__snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1E-3 )
@require_tf
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__lowercase : Tuple = (TFOPTForCausalLM,) if is_tf_available() else ()
__lowercase : int = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__lowercase : Tuple = False
__lowercase : int = False
__lowercase : Any = False
__lowercase : Optional[Any] = 10
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = TFOPTModelTester(self )
__snake_case = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if hasattr(__SCREAMING_SNAKE_CASE , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__SCREAMING_SNAKE_CASE , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__snake_case = model_class(config=__SCREAMING_SNAKE_CASE )
__snake_case = _get_word_embedding_weight(__SCREAMING_SNAKE_CASE , model.get_input_embeddings() )
__snake_case = _get_word_embedding_weight(__SCREAMING_SNAKE_CASE , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__SCREAMING_SNAKE_CASE )
__snake_case = _get_word_embedding_weight(__SCREAMING_SNAKE_CASE , model.get_input_embeddings() )
__snake_case = _get_word_embedding_weight(__SCREAMING_SNAKE_CASE , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__snake_case = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __SCREAMING_SNAKE_CASE )
# check that weights remain the same after resizing
__snake_case = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__snake_case = False
self.assertTrue(__SCREAMING_SNAKE_CASE )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __SCREAMING_SNAKE_CASE )
__snake_case = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__snake_case = False
self.assertTrue(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
return tf.constant(_lowerCamelCase , dtype=tf.intaa )
@require_tf
class lowerCAmelCase ( unittest.TestCase):
__lowercase : Dict = 99
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__snake_case = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__snake_case = input_ids.shape[0]
__snake_case = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__snake_case = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__snake_case = tf.not_equal(__SCREAMING_SNAKE_CASE , model.config.pad_token_id )
with tf.GradientTape():
__snake_case = model(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).last_hidden_state
__snake_case = (1, 11, 512)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=4E-3 ) )
__snake_case = tf.function(__SCREAMING_SNAKE_CASE , jit_compile=__SCREAMING_SNAKE_CASE )
__snake_case = xla_generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=4E-2 ) )
@require_tf
@slow
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
__snake_case = '''facebook/opt-350m'''
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = TFOPTForCausalLM.from_pretrained(self.path_model )
__snake_case = GPTaTokenizer.from_pretrained(self.path_model )
__snake_case = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__snake_case = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
__snake_case = tf.function(__SCREAMING_SNAKE_CASE , jit_compile=__SCREAMING_SNAKE_CASE )
__snake_case = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@require_tf
@slow
class lowerCAmelCase ( unittest.TestCase):
@property
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = '''facebook/opt-125m'''
__snake_case = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__snake_case = []
__snake_case = GPTaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__snake_case = TFOPTForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
for prompt in self.prompts:
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ).input_ids
__snake_case = model.generate(__SCREAMING_SNAKE_CASE , max_length=10 )
__snake_case = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
predicted_outputs += generated_string
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = '''facebook/opt-350m'''
__snake_case = GPTaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__snake_case = TFOPTForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__snake_case = '''left'''
# use different length sentences to test batching
__snake_case = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE )
__snake_case = inputs['''input_ids''']
__snake_case = model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''] )
__snake_case = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__snake_case = model.generate(input_ids=__SCREAMING_SNAKE_CASE )
__snake_case = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__snake_case = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__snake_case = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_length=model.config.max_length - num_paddings )
__snake_case = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''facebook/opt-350m'''
__snake_case = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__snake_case = []
__snake_case = GPTaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__snake_case = TFOPTForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
for prompt in self.prompts:
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ).input_ids
__snake_case = model.generate(__SCREAMING_SNAKE_CASE , max_length=10 )
__snake_case = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
predicted_outputs += generated_string
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : complex , _lowerCamelCase : str = "x" , _lowerCamelCase : float = 10**-10 , _lowerCamelCase : int = 1 , )-> complex:
'''simple docstring'''
__snake_case = symbols(_lowerCamelCase )
__snake_case = lambdify(_lowerCamelCase , _lowerCamelCase )
__snake_case = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
__snake_case = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
__snake_case = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__snake_case = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F"""{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 24 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 1 |
'''simple docstring'''
import math
import os
import sys
def _UpperCamelCase (_lowerCamelCase : str )-> str:
'''simple docstring'''
__snake_case = ''''''
try:
with open(_lowerCamelCase , '''rb''' ) as binary_file:
__snake_case = binary_file.read()
for dat in data:
__snake_case = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _UpperCamelCase (_lowerCamelCase : dict[str, str] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str )-> None:
'''simple docstring'''
lexicon.pop(_lowerCamelCase )
__snake_case = last_match_id
if math.loga(_lowerCamelCase ).is_integer():
for curr_key in lexicon:
__snake_case = '''0''' + lexicon[curr_key]
__snake_case = bin(_lowerCamelCase )[2:]
def _UpperCamelCase (_lowerCamelCase : str )-> str:
'''simple docstring'''
__snake_case = {'''0''': '''0''', '''1''': '''1'''}
__snake_case , __snake_case = '''''', ''''''
__snake_case = len(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
index += 1
__snake_case = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case = lexicon[curr_string]
result += last_match_id
return result
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> str:
'''simple docstring'''
__snake_case = os.path.getsize(_lowerCamelCase )
__snake_case = bin(_lowerCamelCase )[2:]
__snake_case = len(_lowerCamelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> None:
'''simple docstring'''
__snake_case = 8
try:
with open(_lowerCamelCase , '''wb''' ) as opened_file:
__snake_case = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_lowerCamelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> None:
'''simple docstring'''
__snake_case = read_file_binary(_lowerCamelCase )
__snake_case = compress_data(_lowerCamelCase )
__snake_case = add_file_length(_lowerCamelCase , _lowerCamelCase )
write_file_binary(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 24 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int )-> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(_lowerCamelCase ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 1 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCAmelCase_ : Tuple = '''<<<<<<< This should probably be modified because it mentions: '''
UpperCAmelCase_ : Optional[int] = '''=======
>>>>>>>
'''
UpperCAmelCase_ : Tuple = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
UpperCAmelCase_ : Optional[int] = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _UpperCamelCase (_lowerCamelCase : Namespace )-> int:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCAmelCase ( __lowerCAmelCase):
@staticmethod
def lowerCAmelCase ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__snake_case = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = get_logger('''datasets-cli/converting''' )
__snake_case = tfds_path
__snake_case = datasets_directory
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__snake_case = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__snake_case = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__snake_case = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
__snake_case = []
__snake_case = []
__snake_case = {}
if os.path.isdir(self._tfds_path ):
__snake_case = os.listdir(__SCREAMING_SNAKE_CASE )
else:
__snake_case = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not os.path.isfile(__SCREAMING_SNAKE_CASE ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
__snake_case = f.readlines()
__snake_case = []
__snake_case = False
__snake_case = False
__snake_case = []
for line in lines:
__snake_case = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__snake_case = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__snake_case = ''''''
continue
elif "from absl import logging" in out_line:
__snake_case = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__snake_case = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__snake_case = True
__snake_case = list(filter(lambda __SCREAMING_SNAKE_CASE : e in out_line , __SCREAMING_SNAKE_CASE ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__SCREAMING_SNAKE_CASE ) + '''\n''' )
out_lines.append(__SCREAMING_SNAKE_CASE )
out_lines.append(__SCREAMING_SNAKE_CASE )
continue
else:
for pattern, replacement in TO_CONVERT:
__snake_case = re.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__snake_case = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __SCREAMING_SNAKE_CASE )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__snake_case = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__snake_case = True
out_lines.append(__SCREAMING_SNAKE_CASE )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__snake_case = f_name.replace('''.py''' , '''''' )
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__SCREAMING_SNAKE_CASE )
if needs_manual_update:
with_manual_update.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
__snake_case = os.path.basename(__SCREAMING_SNAKE_CASE )
__snake_case = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 1 |
'''simple docstring'''
UpperCAmelCase_ : Any = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
UpperCAmelCase_ : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
UpperCAmelCase_ : Optional[Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 24 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 1 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Dict )-> Optional[int]:
'''simple docstring'''
__snake_case = AlbertConfig.from_json_file(_lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
__snake_case = AlbertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 24 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 1 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = TypeVar('''DatasetType''', Dataset, IterableDataset)
def _UpperCamelCase (_lowerCamelCase : List[DatasetType] , _lowerCamelCase : Optional[List[float]] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[DatasetInfo] = None , _lowerCamelCase : Optional[NamedSplit] = None , _lowerCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , )-> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_lowerCamelCase ):
if not isinstance(_lowerCamelCase , (Dataset, IterableDataset) ):
if isinstance(_lowerCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(_lowerCamelCase )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_lowerCamelCase ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCamelCase ).__name__}.''' )
if i == 0:
__snake_case , __snake_case = (
(Dataset, IterableDataset) if isinstance(_lowerCamelCase , _lowerCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , info=_lowerCamelCase , split=_lowerCamelCase , stopping_strategy=_lowerCamelCase )
else:
return _interleave_iterable_datasets(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , info=_lowerCamelCase , split=_lowerCamelCase , stopping_strategy=_lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : List[DatasetType] , _lowerCamelCase : Optional[DatasetInfo] = None , _lowerCamelCase : Optional[NamedSplit] = None , _lowerCamelCase : int = 0 , )-> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_lowerCamelCase ):
if not isinstance(_lowerCamelCase , (Dataset, IterableDataset) ):
if isinstance(_lowerCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(_lowerCamelCase )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_lowerCamelCase ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCamelCase ).__name__}.''' )
if i == 0:
__snake_case , __snake_case = (
(Dataset, IterableDataset) if isinstance(_lowerCamelCase , _lowerCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_lowerCamelCase , info=_lowerCamelCase , split=_lowerCamelCase , axis=_lowerCamelCase )
else:
return _concatenate_iterable_datasets(_lowerCamelCase , info=_lowerCamelCase , split=_lowerCamelCase , axis=_lowerCamelCase )
| 24 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ : Any = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
UpperCAmelCase_ : str = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
@lru_cache()
def _UpperCamelCase ()-> str:
'''simple docstring'''
__snake_case = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__snake_case = bs[:]
__snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__snake_case = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def _UpperCamelCase (_lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = set()
__snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case = char
return pairs
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = VOCAB_FILES_NAMES
__lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else bos_token
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else eos_token
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else sep_token
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cls_token
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else unk_token
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
__snake_case = json.load(__SCREAMING_SNAKE_CASE )
__snake_case = {v: k for k, v in self.encoder.items()}
__snake_case = errors # how to handle errors in decoding
__snake_case = bytes_to_unicode()
__snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle:
__snake_case = merges_handle.read().split('''\n''' )[1:-1]
__snake_case = [tuple(merge.split() ) for merge in bpe_merges]
__snake_case = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__snake_case = {}
__snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__snake_case = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__snake_case = tuple(__SCREAMING_SNAKE_CASE )
__snake_case = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
__snake_case = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case = bigram
__snake_case = []
__snake_case = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
__snake_case = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case = tuple(__SCREAMING_SNAKE_CASE )
__snake_case = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
__snake_case = get_pairs(__SCREAMING_SNAKE_CASE )
__snake_case = ''' '''.join(__SCREAMING_SNAKE_CASE )
__snake_case = word
return word
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = []
for token in re.findall(self.pat , __SCREAMING_SNAKE_CASE ):
__snake_case = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__SCREAMING_SNAKE_CASE ).split(''' ''' ) )
return bpe_tokens
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = ''''''.join(__SCREAMING_SNAKE_CASE )
__snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + '''\n''' )
__snake_case = 0
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__snake_case = token_index
writer.write(''' '''.join(__SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
__snake_case = ''' ''' + text
return (text, kwargs)
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowerCAmelCase :
__lowercase : int
__lowercase : Node | None = None
__lowercase : Node | None = None
def _UpperCamelCase ()-> Node | None:
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
return tree
def _UpperCamelCase (_lowerCamelCase : Node | None )-> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _UpperCamelCase (_lowerCamelCase : Node | None )-> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _UpperCamelCase (_lowerCamelCase : Node | None )-> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _UpperCamelCase (_lowerCamelCase : Node | None )-> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _UpperCamelCase (_lowerCamelCase : Node | None )-> Sequence[Node | None]:
'''simple docstring'''
__snake_case = []
if root is None:
return output
__snake_case = deque([root] )
while process_queue:
__snake_case = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _UpperCamelCase (_lowerCamelCase : Node | None , _lowerCamelCase : int )-> Sequence[Node | None]:
'''simple docstring'''
__snake_case = []
def populate_output(_lowerCamelCase : Node | None , _lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_lowerCamelCase , _lowerCamelCase )
return output
def _UpperCamelCase (_lowerCamelCase : Node | None , _lowerCamelCase : int )-> Sequence[Node | None]:
'''simple docstring'''
__snake_case = []
def populate_output(_lowerCamelCase : Node | None , _lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_lowerCamelCase , _lowerCamelCase )
return output
def _UpperCamelCase (_lowerCamelCase : Node | None )-> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
__snake_case = []
__snake_case = 0
__snake_case = height(_lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase , _lowerCamelCase ) )
__snake_case = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase , _lowerCamelCase ) )
__snake_case = 0
return output
def _UpperCamelCase ()-> None: # Main function for testing.
'''simple docstring'''
__snake_case = make_tree()
print(f'''In-order Traversal: {inorder(_lowerCamelCase )}''' )
print(f'''Pre-order Traversal: {preorder(_lowerCamelCase )}''' )
print(f'''Post-order Traversal: {postorder(_lowerCamelCase )}''' , '''\n''' )
print(f'''Height of Tree: {height(_lowerCamelCase )}''' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(_lowerCamelCase ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(_lowerCamelCase ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(_lowerCamelCase , level=_lowerCamelCase ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 1 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
__snake_case = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f'''{test_file} instead.''' )
__snake_case = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
__snake_case = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
__snake_case = '''.'''.join(_lowerCamelCase )
return test_module_path
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
__snake_case = get_module_path(_lowerCamelCase )
__snake_case = importlib.import_module(_lowerCamelCase )
return test_module
def _UpperCamelCase (_lowerCamelCase : int )-> List[str]:
'''simple docstring'''
__snake_case = []
__snake_case = get_test_module(_lowerCamelCase )
for attr in dir(_lowerCamelCase ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(_lowerCamelCase , _lowerCamelCase ) )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x.__name__ )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
__snake_case = get_test_module(_lowerCamelCase )
for attr in dir(_lowerCamelCase ):
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__snake_case = getattr(_lowerCamelCase , '''all_model_classes''' , [] )
if len(_lowerCamelCase ) > 0:
test_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x.__name__ )
def _UpperCamelCase (_lowerCamelCase : List[str] )-> str:
'''simple docstring'''
__snake_case = get_test_classes(_lowerCamelCase )
__snake_case = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x.__name__ )
def _UpperCamelCase (_lowerCamelCase : Tuple )-> Any:
'''simple docstring'''
__snake_case = test_class()
if hasattr(_lowerCamelCase , '''setUp''' ):
test.setUp()
__snake_case = None
if hasattr(_lowerCamelCase , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__snake_case = test.model_tester.__class__
return model_tester
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = get_test_classes(_lowerCamelCase )
__snake_case = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x.__name__ )
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : int )-> Optional[int]:
'''simple docstring'''
__snake_case = get_test_classes_for_model(_lowerCamelCase , _lowerCamelCase )
__snake_case = []
for test_class in test_classes:
__snake_case = get_model_tester_from_test_class(_lowerCamelCase )
if tester_class is not None:
tester_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x.__name__ )
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
__snake_case = get_test_classes(_lowerCamelCase )
__snake_case = {test_class: get_model_tester_from_test_class(_lowerCamelCase ) for test_class in test_classes}
return test_tester_mapping
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
__snake_case = get_model_classes(_lowerCamelCase )
__snake_case = {
model_class: get_test_classes_for_model(_lowerCamelCase , _lowerCamelCase ) for model_class in model_classes
}
return model_test_mapping
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = get_model_classes(_lowerCamelCase )
__snake_case = {
model_class: get_tester_classes_for_model(_lowerCamelCase , _lowerCamelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return o
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return o.__name__
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_json(_lowerCamelCase ) for x in o]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return {to_json(_lowerCamelCase ): to_json(_lowerCamelCase ) for k, v in o.items()}
else:
return o
| 24 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> Optional[int]:
'''simple docstring'''
__snake_case = parent
__snake_case = 13
__snake_case = 7
__snake_case = True
__snake_case = True
__snake_case = True
__snake_case = True
__snake_case = 99
__snake_case = 384
__snake_case = 2
__snake_case = 4
__snake_case = 37
__snake_case = '''gelu'''
__snake_case = 0.1
__snake_case = 0.1
__snake_case = 512
__snake_case = 16
__snake_case = 2
__snake_case = 0.02
__snake_case = 3
__snake_case = 4
__snake_case = 128
__snake_case = 2
__snake_case = 9
__snake_case = 1
__snake_case = None
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = TFConvBertModel(config=__SCREAMING_SNAKE_CASE )
__snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__snake_case = [input_ids, input_mask]
__snake_case = model(__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = TFConvBertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
__snake_case = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__snake_case = self.num_labels
__snake_case = TFConvBertForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
__snake_case = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = self.num_choices
__snake_case = TFConvBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
__snake_case = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__snake_case = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__snake_case = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__snake_case = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.num_labels
__snake_case = TFConvBertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
__snake_case = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = TFConvBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
__snake_case = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : str = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : Optional[int] = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Optional[Any] = False
__lowercase : List[str] = False
__lowercase : Union[str, Any] = False
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = TFConvBertModelTester(self )
__snake_case = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = True
if hasattr(__SCREAMING_SNAKE_CASE , '''use_cache''' ):
__snake_case = True
__snake_case = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__snake_case = getattr(self.model_tester , '''key_length''' , __SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__snake_case = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
__snake_case = len(model(__SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE , saved_model=__SCREAMING_SNAKE_CASE )
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , '''saved_model''' , '''1''' )
__snake_case = tf.keras.models.load_model(__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__snake_case = outputs['''encoder_hidden_states''']
__snake_case = outputs['''encoder_attentions''']
else:
__snake_case = outputs['''hidden_states''']
__snake_case = outputs['''attentions''']
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__snake_case = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
__snake_case = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__snake_case = getattr(self.model_tester , '''key_length''' , __SCREAMING_SNAKE_CASE )
__snake_case = getattr(self.model_tester , '''key_length''' , __SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(__SCREAMING_SNAKE_CASE ):
__snake_case = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__snake_case = outputs.decoder_attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ):
__snake_case = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__snake_case = True
__snake_case = False
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
__snake_case = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__snake_case = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
__snake_case = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__snake_case = True
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
__snake_case = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__snake_case = True
__snake_case = True
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
__snake_case = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
@require_tf
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
__snake_case = tf.constant([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = [1, 6, 768]
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
| 24 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 1 |
'''simple docstring'''
class lowerCAmelCase :
def __init__( self ) -> Any:
'''simple docstring'''
__snake_case = {}
def lowerCAmelCase ( self ) -> None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(__SCREAMING_SNAKE_CASE , ''' -> ''' , ''' -> '''.join([str(__SCREAMING_SNAKE_CASE ) for j in self.vertex[i]] ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__SCREAMING_SNAKE_CASE )
else:
# else make a new vertex
__snake_case = [to_vertex]
def lowerCAmelCase ( self ) -> None:
'''simple docstring'''
__snake_case = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
__snake_case = True
print(__SCREAMING_SNAKE_CASE , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 24 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(_lowerCamelCase ), magnitude * sin(_lowerCamelCase )]
return [magnitude * cos(radians(_lowerCamelCase ) ), magnitude * sin(radians(_lowerCamelCase ) )]
def _UpperCamelCase (_lowerCamelCase : NDArray[floataa] , _lowerCamelCase : NDArray[floataa] , _lowerCamelCase : float = 10**-1 )-> bool:
'''simple docstring'''
__snake_case = cross(_lowerCamelCase , _lowerCamelCase )
__snake_case = sum(_lowerCamelCase )
return abs(_lowerCamelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase_ : List[str] = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
UpperCAmelCase_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase_ : str = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
UpperCAmelCase_ : Optional[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase_ : List[Any] = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
UpperCAmelCase_ : int = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = eval_examples
__snake_case = post_process_function
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "eval" , **__SCREAMING_SNAKE_CASE , ) -> Dict[str, float]:
'''simple docstring'''
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__snake_case = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" , **__SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''' )
__snake_case = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__snake_case = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 1 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 1 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ : List[Any] = logging.getLogger()
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : list )-> List[str]:
'''simple docstring'''
__snake_case = '''\n'''.join(_lowerCamelCase )
Path(_lowerCamelCase ).open('''w''' ).writelines(_lowerCamelCase )
UpperCAmelCase_ : str = '''patrickvonplaten/t5-tiny-random'''
UpperCAmelCase_ : int = '''sshleifer/bart-tiny-random'''
UpperCAmelCase_ : Optional[Any] = '''sshleifer/tiny-mbart'''
UpperCAmelCase_ : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase ( __lowerCAmelCase):
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
__snake_case = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
__snake_case = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
__snake_case = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
__snake_case = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
run_generate()
assert Path(__SCREAMING_SNAKE_CASE ).exists()
# os.remove(Path(output_file_name))
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.run_eval_tester(__SCREAMING_SNAKE_CASE )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
self.run_eval_tester(__SCREAMING_SNAKE_CASE )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__snake_case = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
__snake_case = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
__snake_case = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
__snake_case = Path(self.get_auto_remove_tmp_dir() )
__snake_case = str(tmp_dir / '''scores.json''' )
__snake_case = str(tmp_dir / '''val.target''' )
_dump_articles(__SCREAMING_SNAKE_CASE , text['''en'''] )
_dump_articles(__SCREAMING_SNAKE_CASE , text['''de'''] )
__snake_case = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
__snake_case = F'''
run_eval_search.py
{model}
{str(__SCREAMING_SNAKE_CASE )}
{str(__SCREAMING_SNAKE_CASE )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
with CaptureStdout() as cs:
run_search()
__snake_case = [''' num_beams | length_penalty''', model, '''Best score args''']
__snake_case = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(__SCREAMING_SNAKE_CASE )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__SCREAMING_SNAKE_CASE ).exists()
os.remove(Path(__SCREAMING_SNAKE_CASE ) )
| 24 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Optional[Any] = UnCLIPImageVariationPipeline
__lowercase : Union[str, Any] = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
__lowercase : Any = IMAGE_VARIATION_BATCH_PARAMS
__lowercase : Dict = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
__lowercase : int = False
@property
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return 100
@property
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__SCREAMING_SNAKE_CASE )
@property
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__SCREAMING_SNAKE_CASE )
@property
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
__snake_case = UnCLIPTextProjModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
__snake_case = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
torch.manual_seed(1 )
__snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.dummy_decoder
__snake_case = self.dummy_text_proj
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_super_res_first
__snake_case = self.dummy_super_res_last
__snake_case = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
__snake_case = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
__snake_case = CLIPImageProcessor(crop_size=32 , size=32 )
__snake_case = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
if pil_image:
__snake_case = input_image * 0.5 + 0.5
__snake_case = input_image.clamp(0 , 1 )
__snake_case = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__snake_case = DiffusionPipeline.numpy_to_pil(__SCREAMING_SNAKE_CASE )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = '''cpu'''
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__snake_case = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
__snake_case = pipe(**__SCREAMING_SNAKE_CASE )
__snake_case = output.images
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
__snake_case = pipe(
**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = '''cpu'''
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__snake_case = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
__snake_case = pipe(**__SCREAMING_SNAKE_CASE )
__snake_case = output.images
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
__snake_case = pipe(
**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = '''cpu'''
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__snake_case = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
__snake_case = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
__snake_case = pipe(**__SCREAMING_SNAKE_CASE )
__snake_case = output.images
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
__snake_case = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
__snake_case = pipe(
**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
__snake_case = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = torch.device('''cpu''' )
class lowerCAmelCase :
__lowercase : List[Any] = 1
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__snake_case = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
__snake_case = pipe.decoder.dtype
__snake_case = 1
__snake_case = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__snake_case = pipe.prepare_latents(
__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
__snake_case = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__snake_case = pipe.prepare_latents(
__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
__snake_case = pipe(
**__SCREAMING_SNAKE_CASE , decoder_latents=__SCREAMING_SNAKE_CASE , super_res_latents=__SCREAMING_SNAKE_CASE ).images
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
# Don't pass image, instead pass embedding
__snake_case = pipeline_inputs.pop('''image''' )
__snake_case = pipe.image_encoder(__SCREAMING_SNAKE_CASE ).image_embeds
__snake_case = pipe(
**__SCREAMING_SNAKE_CASE , decoder_latents=__SCREAMING_SNAKE_CASE , super_res_latents=__SCREAMING_SNAKE_CASE , image_embeddings=__SCREAMING_SNAKE_CASE , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__snake_case = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=__SCREAMING_SNAKE_CASE )
@skip_mps
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = torch_device == '''cpu'''
__snake_case = True
__snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__SCREAMING_SNAKE_CASE , relax_max_difference=__SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__snake_case = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=__SCREAMING_SNAKE_CASE , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__SCREAMING_SNAKE_CASE )
@skip_mps
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
__snake_case = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
__snake_case = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case = pipeline(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 15 )
| 24 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 1 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ : Optional[int] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__snake_case = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Dict = VideoToVideoSDPipeline
__lowercase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''}) - {'''image''', '''width''', '''height'''}
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''}) - {'''image'''}
__lowercase : Optional[int] = PipelineTesterMixin.required_optional_params - {'''latents'''}
__lowercase : Any = False
# No `output_type`.
__lowercase : Union[str, Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__snake_case = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__snake_case = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = VideoToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__snake_case = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = '''np'''
__snake_case = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__snake_case = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__snake_case = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=5E-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case = torch.randn((1, 10, 3, 1024, 576) , generator=__SCREAMING_SNAKE_CASE )
__snake_case = video.to('''cuda''' )
__snake_case = '''Spiderman is surfing'''
__snake_case = pipe(__SCREAMING_SNAKE_CASE , video=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=3 , output_type='''pt''' ).frames
__snake_case = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 24 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 1 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase):
__lowercase : Any = StableDiffusionLDMaDPipeline
__lowercase : Dict = TEXT_TO_IMAGE_PARAMS
__lowercase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase : int = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__snake_case = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__snake_case = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = StableDiffusionLDMaDPipeline(**__SCREAMING_SNAKE_CASE )
__snake_case = ldmad_pipe.to(__SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = ldmad_pipe(**__SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = output.rgb, output.depth
__snake_case = rgb[0, -3:, -3:, -1]
__snake_case = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
__snake_case = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
__snake_case = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.get_dummy_components()
__snake_case = StableDiffusionLDMaDPipeline(**__SCREAMING_SNAKE_CASE )
__snake_case = ldmad_pipe.to(__SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = 3 * [inputs['''prompt''']]
# forward
__snake_case = ldmad_pipe(**__SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = output.rgb, output.depth
__snake_case = rgb_slice_a[0, -3:, -3:, -1]
__snake_case = depth_slice_a[0, -3:, -1]
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = 3 * [inputs.pop('''prompt''' )]
__snake_case = ldmad_pipe.tokenizer(
__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
__snake_case = text_inputs['''input_ids'''].to(__SCREAMING_SNAKE_CASE )
__snake_case = ldmad_pipe.text_encoder(__SCREAMING_SNAKE_CASE )[0]
__snake_case = prompt_embeds
# forward
__snake_case = ldmad_pipe(**__SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = output.rgb, output.depth
__snake_case = rgb_slice_a[0, -3:, -3:, -1]
__snake_case = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
__snake_case = StableDiffusionLDMaDPipeline(**__SCREAMING_SNAKE_CASE )
__snake_case = ldmad_pipe.to(__SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = '''french fries'''
__snake_case = ldmad_pipe(**__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = output.rgb, output.depth
__snake_case = rgb[0, -3:, -3:, -1]
__snake_case = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
__snake_case = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
__snake_case = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="cpu" , __SCREAMING_SNAKE_CASE=torch.floataa , __SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
'''simple docstring'''
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = np.random.RandomState(__SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
__snake_case = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
__snake_case = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
__snake_case = ldmad_pipe.to(__SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = ldmad_pipe(**__SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = output.rgb, output.depth
__snake_case = rgb[0, -3:, -3:, -1].flatten()
__snake_case = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
__snake_case = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
__snake_case = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="cpu" , __SCREAMING_SNAKE_CASE=torch.floataa , __SCREAMING_SNAKE_CASE=0 ) -> int:
'''simple docstring'''
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = np.random.RandomState(__SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
__snake_case = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
__snake_case = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(__SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = ldmad_pipe(**__SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = output.rgb, output.depth
__snake_case = 0.495_586
__snake_case = 0.33_795_515
__snake_case = 112.48_518
__snake_case = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(__SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = ldmad_pipe(**__SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = output.rgb, output.depth
__snake_case = 0.4_194_127
__snake_case = 0.35_375_586
__snake_case = 0.5_638_502
__snake_case = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 24 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 1 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = tempfile.mkdtemp()
# fmt: off
__snake_case = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__snake_case = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
__snake_case = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.get_tokenizer()
__snake_case = self.get_image_processor()
__snake_case = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
__snake_case = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__snake_case = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
__snake_case = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
__snake_case = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__snake_case = '''lower newer'''
__snake_case = processor(text=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__snake_case = '''lower newer'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
processor()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__snake_case = '''lower newer'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : str = '''resnet'''
__lowercase : Optional[int] = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=[256, 512, 1024, 2048] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
__snake_case = num_channels
__snake_case = embedding_size
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = layer_type
__snake_case = hidden_act
__snake_case = downsample_in_first_stage
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[Any] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-3
| 24 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 1 |
'''simple docstring'''
import pprint
import requests
UpperCAmelCase_ : Dict = '''https://zenquotes.io/api'''
def _UpperCamelCase ()-> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def _UpperCamelCase ()-> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = random_quotes()
pprint.pprint(response)
| 24 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Any:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 18}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : List[Any] = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = LevitImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Union[str, Any] = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 255 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> None:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = size if size is not None else {'''height''': 224, '''width''': 224}
__snake_case = get_size_dict(__SCREAMING_SNAKE_CASE )
__snake_case = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__snake_case = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
__snake_case = do_resize
__snake_case = do_rescale
__snake_case = do_normalize
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = size
__snake_case = resample
__snake_case = rescale_factor
__snake_case = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__snake_case = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
__snake_case = get_size_dict(__SCREAMING_SNAKE_CASE )
if "shortest_edge" in size:
__snake_case = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=__SCREAMING_SNAKE_CASE )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__snake_case = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
__snake_case = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ) -> BatchFeature:
'''simple docstring'''
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case = crop_size if crop_size is not None else self.crop_size
__snake_case = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' , default_to_square=__SCREAMING_SNAKE_CASE )
__snake_case = resample if resample is not None else self.resample
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(__SCREAMING_SNAKE_CASE )
if not is_batched(__SCREAMING_SNAKE_CASE ):
__snake_case = [images]
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
__snake_case = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
__snake_case = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
__snake_case = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
__snake_case = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 1 |
'''simple docstring'''
from functools import reduce
UpperCAmelCase_ : List[str] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _UpperCamelCase (_lowerCamelCase : str = N )-> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowerCamelCase , _lowerCamelCase : str(int(_lowerCamelCase ) * int(_lowerCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(_lowerCamelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowerCAmelCase ( unittest.TestCase):
__lowercase : str = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowercase : Union[str, Any] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__snake_case = TextaTextGenerationPipeline(model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
return generator, ["Something to write", "Something else"]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__snake_case = generator('''Something there''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , [{'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
__snake_case = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
[{'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}, {'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}],
[{'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}, {'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}],
] , )
__snake_case = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
[{'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}, {'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}],
[{'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}, {'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}],
] , )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
generator(4 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
__snake_case = generator('''Something there''' , do_sample=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , [{'''generated_text''': ''''''}] )
__snake_case = 3
__snake_case = generator(
'''Something there''' , num_return_sequences=__SCREAMING_SNAKE_CASE , num_beams=__SCREAMING_SNAKE_CASE , )
__snake_case = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = generator('''This is a test''' , do_sample=__SCREAMING_SNAKE_CASE , num_return_sequences=2 , return_tensors=__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
__snake_case = generator.model.config.eos_token_id
__snake_case = '''<pad>'''
__snake_case = generator(
['''This is a test''', '''This is a second test'''] , do_sample=__SCREAMING_SNAKE_CASE , num_return_sequences=2 , batch_size=2 , return_tensors=__SCREAMING_SNAKE_CASE , )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
__snake_case = generator('''Something there''' , do_sample=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , [{'''generated_text''': ''''''}] )
| 24 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] )-> str:
'''simple docstring'''
for attribute in key.split('''.''' ):
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case = value
elif weight_type == "weight_g":
__snake_case = value
elif weight_type == "weight_v":
__snake_case = value
elif weight_type == "bias":
__snake_case = value
else:
__snake_case = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] )-> List[str]:
'''simple docstring'''
__snake_case = []
__snake_case = fairseq_model.state_dict()
__snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case = True
else:
for key, mapped_key in MAPPING.items():
__snake_case = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case = True
if "*" in mapped_key:
__snake_case = name.split(_lowerCamelCase )[0].split('''.''' )[-2]
__snake_case = mapped_key.replace('''*''' , _lowerCamelCase )
if "weight_g" in name:
__snake_case = '''weight_g'''
elif "weight_v" in name:
__snake_case = '''weight_v'''
elif "weight" in name:
__snake_case = '''weight'''
elif "bias" in name:
__snake_case = '''bias'''
else:
__snake_case = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] )-> Optional[Any]:
'''simple docstring'''
__snake_case = full_name.split('''conv_layers.''' )[-1]
__snake_case = name.split('''.''' )
__snake_case = int(items[0] )
__snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] )-> Union[str, Any]:
'''simple docstring'''
__snake_case = SEWConfig()
if is_finetuned:
__snake_case = model.wav_encoder.wav_model.cfg
else:
__snake_case = model.cfg
__snake_case = fs_config.conv_bias
__snake_case = eval(fs_config.conv_feature_layers )
__snake_case = [x[0] for x in conv_layers]
__snake_case = [x[1] for x in conv_layers]
__snake_case = [x[2] for x in conv_layers]
__snake_case = '''gelu'''
__snake_case = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
__snake_case = 0.0
__snake_case = fs_config.activation_fn.name
__snake_case = fs_config.encoder_embed_dim
__snake_case = 0.02
__snake_case = fs_config.encoder_ffn_embed_dim
__snake_case = 1E-5
__snake_case = fs_config.encoder_layerdrop
__snake_case = fs_config.encoder_attention_heads
__snake_case = fs_config.conv_pos_groups
__snake_case = fs_config.conv_pos
__snake_case = len(_lowerCamelCase )
__snake_case = fs_config.encoder_layers
__snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__snake_case = model.cfg
__snake_case = fs_config.final_dropout
__snake_case = fs_config.layerdrop
__snake_case = fs_config.activation_dropout
__snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__snake_case = fs_config.attention_dropout
__snake_case = fs_config.dropout_input
__snake_case = fs_config.dropout
__snake_case = fs_config.mask_channel_length
__snake_case = fs_config.mask_channel_prob
__snake_case = fs_config.mask_length
__snake_case = fs_config.mask_prob
__snake_case = '''Wav2Vec2FeatureExtractor'''
__snake_case = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=True )-> int:
'''simple docstring'''
if is_finetuned:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__snake_case = SEWConfig.from_pretrained(_lowerCamelCase )
else:
__snake_case = convert_config(model[0] , _lowerCamelCase )
__snake_case = model[0].eval()
__snake_case = True if config.feat_extract_norm == '''layer''' else False
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
if is_finetuned:
if dict_path:
__snake_case = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case = target_dict.pad_index
__snake_case = target_dict.bos_index
__snake_case = target_dict.pad_index
__snake_case = target_dict.bos_index
__snake_case = target_dict.eos_index
__snake_case = len(target_dict.symbols )
__snake_case = os.path.join(_lowerCamelCase , '''vocab.json''' )
if not os.path.isdir(_lowerCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
__snake_case = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCamelCase , )
__snake_case = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case = SEWForCTC(_lowerCamelCase )
else:
__snake_case = SEWModel(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase_ : str = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 24 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = FunnelTokenizer
__lowercase : int = FunnelTokenizerFast
__lowercase : Optional[int] = True
__lowercase : Union[str, Any] = True
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
__snake_case = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = '''UNwant\u00E9d,running'''
__snake_case = '''unwanted, running'''
return input_text, output_text
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.tokenizer_class(self.vocab_file )
__snake_case = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.get_tokenizers(do_lower_case=__SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
__snake_case = tokenizer('''UNwant\u00E9d,running''' )
__snake_case = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
__snake_case = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.