code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ = 1_6
a_ = 3_2
def _a( UpperCamelCase__ : Accelerator, UpperCamelCase__ : int = 1_6, UpperCamelCase__ : str = "bert-base-cased" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =AutoTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCamelCase__ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : int =tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCamelCase__, max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE__ : str =datasets.map(
UpperCamelCase__, batched=UpperCamelCase__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=UpperCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Tuple =tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCamelCase__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''' )
return tokenizer.pad(UpperCamelCase__, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : Any =DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Any =config['''lr''']
SCREAMING_SNAKE_CASE__ : int =int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ : str =int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ : List[Any] =args.model_name_or_path
set_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =get_dataloaders(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : Any =AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__, return_dict=UpperCamelCase__ )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : List[Any] =(
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE__ : int =optimizer_cls(params=model.parameters(), lr=UpperCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE__ : Tuple =accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : Any =(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__, num_warmup_steps=0, num_training_steps=UpperCamelCase__, )
else:
SCREAMING_SNAKE_CASE__ : List[str] =DummyScheduler(UpperCamelCase__, total_num_steps=UpperCamelCase__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =accelerator.prepare(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE__ : int =0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
# Now we train the model
SCREAMING_SNAKE_CASE__ : List[Any] =evaluate.load('''glue''', '''mrpc''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : Dict ={}
for epoch in range(UpperCamelCase__, UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =outputs.loss
SCREAMING_SNAKE_CASE__ : Any =loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
SCREAMING_SNAKE_CASE__ : Any =0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCamelCase__ ) - 1:
SCREAMING_SNAKE_CASE__ : Any =predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE__ : Optional[Any] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCamelCase__, references=UpperCamelCase__, )
SCREAMING_SNAKE_CASE__ : List[str] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
SCREAMING_SNAKE_CASE__ : Dict =eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, '''all_results.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=UpperCamelCase__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=UpperCamelCase__, )
parser.add_argument(
'''--output_dir''', type=UpperCamelCase__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--performance_lower_bound''', type=UpperCamelCase__, default=UpperCamelCase__, help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''', )
parser.add_argument(
'''--num_epochs''', type=UpperCamelCase__, default=3, help='''Number of train epochs.''', )
SCREAMING_SNAKE_CASE__ : Any =parser.parse_args()
SCREAMING_SNAKE_CASE__ : str ={'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665 | 1 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int =[10, 20, 30, 40, 50, 60]
SCREAMING_SNAKE_CASE__ : int =[2, 4, 6, 8, 10, 12]
SCREAMING_SNAKE_CASE__ : Tuple =1_00
self.assertEqual(kp.calc_profit(__lowercase , __lowercase , __lowercase ) , 2_10 )
def __magic_name__ ( self : int ) -> Any:
self.assertRaisesRegex(__lowercase , '''max_weight must greater than zero.''' )
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
self.assertRaisesRegex(__lowercase , '''Weight can not be negative.''' )
def __magic_name__ ( self : Dict ) -> str:
self.assertRaisesRegex(__lowercase , '''Profit can not be negative.''' )
def __magic_name__ ( self : Optional[Any] ) -> Any:
self.assertRaisesRegex(__lowercase , '''max_weight must greater than zero.''' )
def __magic_name__ ( self : int ) -> List[str]:
self.assertRaisesRegex(
__lowercase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 665 |
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] =True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if a[i].islower():
SCREAMING_SNAKE_CASE__ : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =9, 1_4 # noqa: F841
SCREAMING_SNAKE_CASE__ : str =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =defaultdict(UpperCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE__ : str =mst(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE__ : List[Any] =tuple(answer[:2] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 665 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : Any =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : Dict =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : int =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : Any =embedding_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : str =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE__ : str =num_labels
SCREAMING_SNAKE_CASE__ : List[str] =num_choices
SCREAMING_SNAKE_CASE__ : List[str] =scope
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.num_choices
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return torch.tensor(
UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase )
model.to(__lowercase )
model.half()
SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj]
SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
| 665 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : List[str] =BlipImageProcessor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
SCREAMING_SNAKE_CASE__ : List[Any] =InstructBlipProcessor(__lowercase , __lowercase , __lowercase )
processor.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Any , **__lowercase : Union[str, Any] ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).tokenizer
def __magic_name__ ( self : Tuple , **__lowercase : Optional[int] ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def __magic_name__ ( self : Dict , **__lowercase : Optional[int] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).qformer_tokenizer
def __magic_name__ ( self : Optional[Any] ) -> str:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : List[str] =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Any =[Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ : int =self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
self.assertIsInstance(processor.qformer_tokenizer , __lowercase )
def __magic_name__ ( self : int ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Dict =self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any =image_processor(__lowercase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ : int =processor(images=__lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Dict =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : List[Any] =InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] ='''lower newer'''
SCREAMING_SNAKE_CASE__ : Dict =processor(text=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer(__lowercase , return_token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =qformer_tokenizer(__lowercase , return_token_type_ids=__lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def __magic_name__ ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple =self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : str =InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] ='''lower newer'''
SCREAMING_SNAKE_CASE__ : Any =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : str =processor(text=__lowercase , images=__lowercase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple =self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : int =InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ : Any =processor.batch_decode(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : List[Any] =self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] =InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] ='''lower newer'''
SCREAMING_SNAKE_CASE__ : Dict =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any =processor(text=__lowercase , images=__lowercase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 665 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""image_processor""", """tokenizer"""]
snake_case_ = """LayoutLMv3ImageProcessor"""
snake_case_ = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self : int , __lowercase : Optional[Any]=None , __lowercase : Any=None , **__lowercase : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
SCREAMING_SNAKE_CASE__ : Dict =kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowercase , __lowercase )
def __call__( self : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowercase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __lowercase : Union[List[List[int]], List[List[List[int]]]] = None , __lowercase : Optional[Union[List[int], List[List[int]]]] = None , __lowercase : bool = True , __lowercase : Union[bool, str, PaddingStrategy] = False , __lowercase : Union[bool, str, TruncationStrategy] = None , __lowercase : Optional[int] = None , __lowercase : int = 0 , __lowercase : Optional[int] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[bool] = None , __lowercase : bool = False , __lowercase : bool = False , __lowercase : bool = False , __lowercase : bool = False , __lowercase : bool = True , __lowercase : Optional[Union[str, TensorType]] = None , **__lowercase : str , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
SCREAMING_SNAKE_CASE__ : List[str] =self.image_processor(images=__lowercase , return_tensors=__lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowercase , __lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple =[text] # add batch dimension (as the image processor always adds a batch dimension)
SCREAMING_SNAKE_CASE__ : List[str] =features['''words''']
SCREAMING_SNAKE_CASE__ : List[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_token_type_ids=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
# add pixel values
SCREAMING_SNAKE_CASE__ : Union[str, Any] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
SCREAMING_SNAKE_CASE__ : int =self.get_overflowing_images(__lowercase , encoded_inputs['''overflow_to_sample_mapping'''] )
SCREAMING_SNAKE_CASE__ : int =images
return encoded_inputs
def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : List[str] ) -> List[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
SCREAMING_SNAKE_CASE__ : List[str] =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F" {len(__lowercase )} and {len(__lowercase )}" )
return images_with_overflow
def __magic_name__ ( self : int , *__lowercase : Dict , **__lowercase : Tuple ) -> int:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def __magic_name__ ( self : int , *__lowercase : Optional[int] , **__lowercase : Optional[Any] ) -> Optional[Any]:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def __magic_name__ ( self : Optional[int] ) -> Dict:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __magic_name__ ( self : Optional[int] ) -> Any:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowercase , )
return self.image_processor_class
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowercase , )
return self.image_processor
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 665 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : Path, UpperCamelCase__ : str = None, UpperCamelCase__ : str = None, UpperCamelCase__ : str = None, ):
'''simple docstring'''
if config_name_or_path is None:
SCREAMING_SNAKE_CASE__ : List[Any] ='''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE__ : int =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE__ : List[str] =question_encoder_name_or_path
SCREAMING_SNAKE_CASE__ : int =RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE__ : Dict =RagConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =gen_config
SCREAMING_SNAKE_CASE__ : int =question_encoder_config
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class.from_pretrained_question_encoder_generator(
UpperCamelCase__, UpperCamelCase__, config=UpperCamelCase__ )
rag_model.save_pretrained(UpperCamelCase__ )
# Sanity check.
model_class.from_pretrained(UpperCamelCase__ )
# Save tokenizers.
SCREAMING_SNAKE_CASE__ : List[str] =AutoTokenizer.from_pretrained(UpperCamelCase__ )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
SCREAMING_SNAKE_CASE__ : int =AutoTokenizer.from_pretrained(UpperCamelCase__ )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
a_ = parser.parse_args()
a_ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 665 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =size
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size
SCREAMING_SNAKE_CASE__ : str =[0] * size
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =value
while index < self.size:
SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ : List[str] =value
else:
SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ : str =0
while left <= right:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase )
if left <= current_left:
SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] )
SCREAMING_SNAKE_CASE__ : Any =current_left
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
a_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
a_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
a_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
a_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
for tf_name, hf_name in patterns:
SCREAMING_SNAKE_CASE__ : str =k.replace(UpperCamelCase__, UpperCamelCase__ )
return k
def _a( UpperCamelCase__ : dict, UpperCamelCase__ : dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =BigBirdPegasusConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =BigBirdPegasusForConditionalGeneration(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =torch_model.state_dict()
SCREAMING_SNAKE_CASE__ : Dict ={}
# separating decoder weights
SCREAMING_SNAKE_CASE__ : int ={k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
SCREAMING_SNAKE_CASE__ : List[Any] ={k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items(), '''tf -> hf conversion''' ):
SCREAMING_SNAKE_CASE__ : Any =[k.endswith(UpperCamelCase__ ) for ending in KEYS_TO_IGNORE]
if any(UpperCamelCase__ ):
continue
SCREAMING_SNAKE_CASE__ : Optional[int] =DECODER_PATTERNS
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rename_state_dict_key(UpperCamelCase__, UpperCamelCase__ )
if new_k not in state_dict:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =v.T
SCREAMING_SNAKE_CASE__ : int =torch.from_numpy(UpperCamelCase__ )
assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
for k, v in tqdm(remaining_weights.items(), '''tf -> hf conversion''' ):
SCREAMING_SNAKE_CASE__ : Dict =[k.endswith(UpperCamelCase__ ) for ending in KEYS_TO_IGNORE]
if any(UpperCamelCase__ ):
continue
SCREAMING_SNAKE_CASE__ : List[Any] =REMAINING_PATTERNS
SCREAMING_SNAKE_CASE__ : Any =rename_state_dict_key(UpperCamelCase__, UpperCamelCase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =v.T
SCREAMING_SNAKE_CASE__ : List[str] =torch.from_numpy(UpperCamelCase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
SCREAMING_SNAKE_CASE__ : Dict =mapping['''model.embed_positions.weight''']
SCREAMING_SNAKE_CASE__ : Optional[int] =mapping.pop('''model.embed_positions.weight''' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =torch_model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =[
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =tf.train.list_variables(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict ={}
SCREAMING_SNAKE_CASE__ : int =['''global_step''']
for name, shape in tqdm(UpperCamelCase__, desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE__ : List[str] =any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE__ : Any =tf.train.load_variable(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =array
return tf_weights
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =get_tf_weights_as_numpy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =convert_bigbird_pegasus(UpperCamelCase__, UpperCamelCase__ )
torch_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
a_ = parser.parse_args()
a_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""vqvae"""]
def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase )
def __magic_name__ ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , __lowercase ) else 10_00
@torch.no_grad()
def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowercase , device=self.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =noise
SCREAMING_SNAKE_CASE__ : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample(
generator=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample''']
else:
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample''']
if isinstance(self.scheduler , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE__ : Any =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) )
@torch.no_grad()
def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowercase )
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : int =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
| 665 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _a( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def _a( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def _a( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head('''https://huggingface.co''' )
| 700 |
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _snake_case ):
snake_case_ = ["""pixel_values"""]
def __init__( self : List[Any] , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 2_55 , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : bool = True , **__lowercase : Any , ) -> None:
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Any =size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : int =get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] =size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =resample
SCREAMING_SNAKE_CASE__ : Any =do_rescale
SCREAMING_SNAKE_CASE__ : int =rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] =do_normalize
SCREAMING_SNAKE_CASE__ : int =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE__ : str =image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE__ : int =do_convert_rgb
def __magic_name__ ( self : int , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE__ : Any =(size['''height'''], size['''width'''])
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self : Union[str, Any] , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[int] , ) -> Optional[Any]:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self : Any , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self : Union[str, Any] , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : bool = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : int , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE__ : str =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Optional[Any] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : int =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : str =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Dict =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : List[Any] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE__ : Any =size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE__ : Optional[int] =[convert_to_rgb(lowerCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] =[to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] =[self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE__ : int =BatchFeature(data={'''pixel_values''': images} , tensor_type=lowerCAmelCase__ )
return encoded_outputs
| 701 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test'''
SCREAMING_SNAKE_CASE__ : int ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) , 81 )
def __magic_name__ ( self : Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE__ : List[Any] =[
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
| 665 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int]=13 , __lowercase : str=32 , __lowercase : List[Any]=3 , __lowercase : Optional[Any]=4 , __lowercase : Dict=[10, 20, 30, 40] , __lowercase : List[Any]=[2, 2, 3, 2] , __lowercase : Tuple=True , __lowercase : Tuple=True , __lowercase : Any=37 , __lowercase : str="gelu" , __lowercase : Optional[int]=10 , __lowercase : Dict=0.02 , __lowercase : int=["stage2", "stage3", "stage4"] , __lowercase : Optional[int]=[2, 3, 4] , __lowercase : List[str]=None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple =parent
SCREAMING_SNAKE_CASE__ : Optional[int] =batch_size
SCREAMING_SNAKE_CASE__ : Tuple =image_size
SCREAMING_SNAKE_CASE__ : str =num_channels
SCREAMING_SNAKE_CASE__ : Tuple =num_stages
SCREAMING_SNAKE_CASE__ : str =hidden_sizes
SCREAMING_SNAKE_CASE__ : Union[str, Any] =depths
SCREAMING_SNAKE_CASE__ : Optional[Any] =is_training
SCREAMING_SNAKE_CASE__ : str =use_labels
SCREAMING_SNAKE_CASE__ : Dict =intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple =hidden_act
SCREAMING_SNAKE_CASE__ : Any =num_labels
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Any =out_features
SCREAMING_SNAKE_CASE__ : Any =out_indices
SCREAMING_SNAKE_CASE__ : List[str] =scope
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Dict =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Dict ) -> Optional[int]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __magic_name__ ( self : List[Any] , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Any =ConvNextVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : int =model(UpperCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : str , __lowercase : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =ConvNextVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Any =model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[Any] , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(UpperCAmelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE__ : Dict =None
SCREAMING_SNAKE_CASE__ : Optional[int] =ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =config_and_inputs
SCREAMING_SNAKE_CASE__ : List[str] ={'''pixel_values''': pixel_values}
return config, inputs_dict
def __magic_name__ ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[int] ={'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , unittest.TestCase ):
snake_case_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =ConvNextVaModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def __magic_name__ ( self : List[Any] ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : str ) -> Optional[Any]:
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def __magic_name__ ( self : Optional[int] ) -> List[str]:
pass
def __magic_name__ ( self : Dict ) -> Optional[int]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_with_labels()
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if model_class.__name__ in [
*get_values(UpperCAmelCase_ ),
*get_values(UpperCAmelCase_ ),
]:
continue
SCREAMING_SNAKE_CASE__ : List[Any] =model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[Any] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : int =model(**UpperCAmelCase_ ).loss
loss.backward()
def __magic_name__ ( self : Optional[int] ) -> Any:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs_with_labels()
SCREAMING_SNAKE_CASE__ : Any =False
SCREAMING_SNAKE_CASE__ : Any =True
if (
model_class.__name__
in [*get_values(UpperCAmelCase_ ), *get_values(UpperCAmelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE__ : Optional[int] =model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE__ : Tuple =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple =model(**UpperCAmelCase_ ).loss
loss.backward()
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] =model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Optional[Any] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : int =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ) -> Dict:
def check_hidden_states_output(__lowercase : int , __lowercase : Optional[Any] , __lowercase : str ):
SCREAMING_SNAKE_CASE__ : Tuple =model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : int =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def __magic_name__ ( self : Tuple ) -> str:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _a( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Any =prepare_img()
SCREAMING_SNAKE_CASE__ : int =preprocessor(images=UpperCAmelCase_ , return_tensors='''pt''' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple =model(**UpperCAmelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[Any] =torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor([0.9996, 0.1966, -0.4386] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
| 702 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size
SCREAMING_SNAKE_CASE__ : List[Any] =num_channels
SCREAMING_SNAKE_CASE__ : int =patch_size
SCREAMING_SNAKE_CASE__ : Tuple =num_frames
SCREAMING_SNAKE_CASE__ : List[Any] =is_training
SCREAMING_SNAKE_CASE__ : List[str] =use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : Any =scope
SCREAMING_SNAKE_CASE__ : int =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels
return config
def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )
# verify the logits shape
SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowercase )
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester(
self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int:
SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
pass
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def __magic_name__ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowercase )
@slow
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] =True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
SCREAMING_SNAKE_CASE__ : str =False
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : List[Any] =True
SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + 1 , len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self : Tuple ) -> List[Any]:
def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowercase ) , __lowercase )
SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[str] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Any ) -> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =prepare_video()
SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 665 | 0 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
a_ = 1_0_0
a_ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
a_ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
SCREAMING_SNAKE_CASE__ : List[str] =set()
SCREAMING_SNAKE_CASE__ : Dict =4_2
SCREAMING_SNAKE_CASE__ : Union[str, Any] =4_2
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a( UpperCamelCase__ : Optional[Any] = 5_0_0_0 ):
'''simple docstring'''
for number_to_partition in range(1, UpperCamelCase__ ):
if len(partition(UpperCamelCase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 703 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
a_ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Any =do_lower_case
SCREAMING_SNAKE_CASE__ : Any =strip_accents
SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 665 | 0 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase ):
snake_case_ = """pixel_values"""
snake_case_ = False
snake_case_ = TimmBackboneConfig
def __init__( self : Tuple , __lowercase : Dict , **__lowercase : List[str] ) -> Any:
requires_backends(self , '''timm''' )
super().__init__(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : str =config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F"backbone {config.backbone} is not supported by timm." )
if hasattr(UpperCamelCase_ , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =getattr(UpperCamelCase_ , '''use_pretrained_backbone''' , UpperCamelCase_ )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
SCREAMING_SNAKE_CASE__ : int =config.out_indices if getattr(UpperCamelCase_ , '''out_indices''' , UpperCamelCase_ ) is not None else (-1,)
SCREAMING_SNAKE_CASE__ : Any =timm.create_model(
config.backbone , pretrained=UpperCamelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCamelCase_ , **UpperCamelCase_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
SCREAMING_SNAKE_CASE__ : Optional[int] =self._backbone.return_layers
SCREAMING_SNAKE_CASE__ : Any ={layer['module']: str(UpperCamelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCamelCase_ )
@classmethod
def __magic_name__ ( cls : int , __lowercase : List[Any] , *__lowercase : str , **__lowercase : Dict ) -> List[Any]:
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
SCREAMING_SNAKE_CASE__ : Dict =kwargs.pop('''config''' , TimmBackboneConfig() )
SCREAMING_SNAKE_CASE__ : List[Any] =kwargs.pop('''use_timm_backbone''' , UpperCamelCase_ )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =kwargs.pop('''num_channels''' , config.num_channels )
SCREAMING_SNAKE_CASE__ : Dict =kwargs.pop('''features_only''' , config.features_only )
SCREAMING_SNAKE_CASE__ : Optional[int] =kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
SCREAMING_SNAKE_CASE__ : Any =kwargs.pop('''out_indices''' , config.out_indices )
SCREAMING_SNAKE_CASE__ : Optional[int] =TimmBackboneConfig(
backbone=UpperCamelCase_ , num_channels=UpperCamelCase_ , features_only=UpperCamelCase_ , use_pretrained_backbone=UpperCamelCase_ , out_indices=UpperCamelCase_ , )
return super()._from_config(UpperCamelCase_ , **UpperCamelCase_ )
def __magic_name__ ( self : str , __lowercase : Union[str, Any] ) -> int:
pass
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : int=None , __lowercase : Dict=None , **__lowercase : Union[str, Any] ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ : List[Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
SCREAMING_SNAKE_CASE__ : Tuple =self._all_layers
SCREAMING_SNAKE_CASE__ : Any =self._backbone(UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] =self._return_layers
SCREAMING_SNAKE_CASE__ : Tuple =tuple(hidden_states[i] for i in self.out_indices )
else:
SCREAMING_SNAKE_CASE__ : List[Any] =self._backbone(UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple =None
SCREAMING_SNAKE_CASE__ : Dict =tuple(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple =tuple(UpperCamelCase_ ) if hidden_states is not None else None
if not return_dict:
SCREAMING_SNAKE_CASE__ : List[str] =(feature_maps,)
if output_hidden_states:
SCREAMING_SNAKE_CASE__ : Tuple =output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCamelCase_ , hidden_states=UpperCamelCase_ , attentions=UpperCamelCase_ )
| 704 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a_ = False
a_ = False
def _a( UpperCamelCase__ : Namespace ):
'''simple docstring'''
return TrainCommand(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@staticmethod
def __magic_name__ ( __lowercase : ArgumentParser ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' )
SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =args.output
SCREAMING_SNAKE_CASE__ : str =args.column_label
SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text
SCREAMING_SNAKE_CASE__ : Tuple =args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split
SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size
SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate
SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon
def __magic_name__ ( self : Any ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __magic_name__ ( self : Optional[int] ) -> Tuple:
raise NotImplementedError
def __magic_name__ ( self : Dict ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 665 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
snake_case_ = """ibert"""
def __init__( self : int , __lowercase : List[str]=3_05_22 , __lowercase : Optional[int]=7_68 , __lowercase : List[Any]=12 , __lowercase : str=12 , __lowercase : List[str]=30_72 , __lowercase : Dict="gelu" , __lowercase : Dict=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Any=5_12 , __lowercase : List[str]=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Any=1e-12 , __lowercase : int=1 , __lowercase : Optional[Any]=0 , __lowercase : int=2 , __lowercase : int="absolute" , __lowercase : Tuple=False , __lowercase : Dict="none" , **__lowercase : Tuple , ) -> List[str]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any =vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] =hidden_act
SCREAMING_SNAKE_CASE__ : List[str] =intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =type_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =initializer_range
SCREAMING_SNAKE_CASE__ : Tuple =layer_norm_eps
SCREAMING_SNAKE_CASE__ : int =position_embedding_type
SCREAMING_SNAKE_CASE__ : List[str] =quant_mode
SCREAMING_SNAKE_CASE__ : int =force_dequant
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def __magic_name__ ( self : List[Any] ) -> List[str]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : Any ={0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : Any ={0: "batch", 1: "sequence"}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 705 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = KandinskyVaaImgaImgPipeline
snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""]
snake_case_ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
snake_case_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[str] ) -> Tuple:
return 32
@property
def __magic_name__ ( self : List[str] ) -> str:
return 32
@property
def __magic_name__ ( self : Any ) -> Optional[int]:
return self.time_input_dim
@property
def __magic_name__ ( self : List[Any] ) -> int:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Tuple ) -> Optional[int]:
return 1_00
@property
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase )
return model
@property
def __magic_name__ ( self : Dict ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : str ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu'''
SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple =np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : int =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 0 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(lowerCamelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ : Tuple =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : str =0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ : int =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE__ : Dict =np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE__ : Dict =np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ : List[Any] =0
SCREAMING_SNAKE_CASE__ : Optional[int] =0
return updated_arr
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : List[str], UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =np.array(lowerCamelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : List[str] =0
SCREAMING_SNAKE_CASE__ : List[Any] =0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ : str =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE__ : List[Any] =int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
a_ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 706 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a_ = TypeVar('T')
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
snake_case_ = 42 # Cache store of keys
snake_case_ = 42 # References of the keys in cache
snake_case_ = 10 # Maximum capacity of cache
def __init__( self : Dict , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Any =deque()
SCREAMING_SNAKE_CASE__ : str =set()
if not n:
SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =n
def __magic_name__ ( self : List[str] , __lowercase : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> None:
for k in self.dq_store:
print(__lowercase )
def __repr__( self : List[Any] ) -> str:
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 665 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
def __init__( self : Optional[Any] , *__lowercase : Any , **__lowercase : Dict ) -> None:
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 707 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float
for row in range(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col]
SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0]
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE__ : Tuple =0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, UpperCamelCase__ ):
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ )
]
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for x_val, y_val in enumerate(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE__ : Dict =y_val
SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ )
def interpolated_func(UpperCamelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase__ ) )
return interpolated_func
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Callable[[int], int]
SCREAMING_SNAKE_CASE__ : int
for poly in polynomials:
SCREAMING_SNAKE_CASE__ : Any =1
while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ):
x_val += 1
ret += poly(UpperCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __SCREAMING_SNAKE_CASE ( __A ):
'''simple docstring'''
snake_case_ = """imagegpt"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , __lowercase : Union[str, Any]=5_12 + 1 , __lowercase : str=32 * 32 , __lowercase : Union[str, Any]=5_12 , __lowercase : Optional[int]=24 , __lowercase : List[str]=8 , __lowercase : Dict=None , __lowercase : Dict="quick_gelu" , __lowercase : List[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Any=0.1 , __lowercase : Union[str, Any]=1e-5 , __lowercase : Tuple=0.02 , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=True , __lowercase : int=False , __lowercase : Any=False , __lowercase : Any=False , **__lowercase : int , ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[Any] =vocab_size
SCREAMING_SNAKE_CASE__ : str =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Optional[Any] =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : Dict =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Any =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : int =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Optional[int] =use_cache
SCREAMING_SNAKE_CASE__ : Optional[Any] =scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE__ : List[str] =reorder_and_upcast_attn
SCREAMING_SNAKE_CASE__ : Optional[int] =tie_word_embeddings
super().__init__(tie_word_embeddings=__lowercase , **__lowercase )
class __SCREAMING_SNAKE_CASE ( __A ):
'''simple docstring'''
@property
def __magic_name__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __magic_name__ ( self : Union[str, Any] , __lowercase : "FeatureExtractionMixin" , __lowercase : int = 1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional["TensorType"] = None , __lowercase : int = 3 , __lowercase : int = 32 , __lowercase : int = 32 , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self._generate_dummy_images(__lowercase , __lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =dict(preprocessor(images=__lowercase , return_tensors=__lowercase ) )
return inputs
| 708 |
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left
SCREAMING_SNAKE_CASE__ : Optional[Any] =point
elif point > right:
SCREAMING_SNAKE_CASE__ : Optional[int] =right
SCREAMING_SNAKE_CASE__ : Tuple =point
else:
if item < current_item:
SCREAMING_SNAKE_CASE__ : str =point - 1
else:
SCREAMING_SNAKE_CASE__ : Tuple =point + 1
return None
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
a_ = 0
if debug == 1:
a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a_ = 6_7
a_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 665 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
a_ = ['bert-base-uncased', 'bert-base-cased']
a_ = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class __SCREAMING_SNAKE_CASE ( tf.keras.Model ):
def __init__( self : Union[str, Any] , __lowercase : List[Any] ) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE__ : str =tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] =AutoConfig.from_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int =TFAutoModel.from_config(_lowerCAmelCase )
def __magic_name__ ( self : Tuple , __lowercase : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] =self.tokenizer(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] =self.bert(**_lowerCAmelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ : List[Any] =[
BertTokenizer.from_pretrained(_lowerCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
SCREAMING_SNAKE_CASE__ : Any =[TFBertTokenizer.from_pretrained(_lowerCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_lowerCAmelCase , use_fast_bert_tokenizer=_lowerCAmelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE__ : Optional[Any] =[
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
SCREAMING_SNAKE_CASE__ : Any =list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer(_lowerCAmelCase , return_tensors='''tf''' , padding='''longest''' )
SCREAMING_SNAKE_CASE__ : Any =tf_tokenizer(_lowerCAmelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def __magic_name__ ( self : List[str] ) -> Any:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ : Optional[Any] =tf_tokenizer(self.paired_sentences )
SCREAMING_SNAKE_CASE__ : Tuple =tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def __magic_name__ ( self : Optional[Any] ) -> int:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ : Optional[Any] =tf.function(_lowerCAmelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE__ : int =tf.constant(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any =compiled_tokenizer(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int =tf_tokenizer(_lowerCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __magic_name__ ( self : List[Any] ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ : str =ModelToSave(tokenizer=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict =tf.convert_to_tensor(self.test_sentences )
SCREAMING_SNAKE_CASE__ : int =model(_lowerCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Path(_lowerCAmelCase ) / '''saved.model'''
model.save(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple =tf.keras.models.load_model(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] =loaded_model(_lowerCAmelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 709 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowercase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@slow
@require_torch
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 665 | 0 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = (UnCLIPScheduler,)
def __magic_name__ ( self : Optional[int] , **__lowercase : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple ={
"num_train_timesteps": 10_00,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**UpperCamelCase_ )
return config
def __magic_name__ ( self : str ) -> Union[str, Any]:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def __magic_name__ ( self : str ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def __magic_name__ ( self : str ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def __magic_name__ ( self : Tuple ) -> Union[str, Any]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase_ )
def __magic_name__ ( self : Optional[Any] ) -> Any:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def __magic_name__ ( self : Any ) -> Tuple:
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase_ , prev_timestep=UpperCamelCase_ )
def __magic_name__ ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : Dict =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE__ : int =scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9994987 ) ) < 1e-5
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : str =self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE__ : Any =scheduler_class(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple =0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase_ ) - -10.1_71_27_90 < 1e-5
assert scheduler._get_variance(4_87 , predicted_variance=UpperCamelCase_ ) - -5.7998052 < 1e-5
assert scheduler._get_variance(9_99 , predicted_variance=UpperCamelCase_ ) - -0.0010011 < 1e-5
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Dict =self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Tuple =scheduler_class(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =scheduler.timesteps
SCREAMING_SNAKE_CASE__ : Any =self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[Any] =self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : Any =torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase_ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ : Dict =model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ : Optional[int] =scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] =pred_prev_sample
SCREAMING_SNAKE_CASE__ : Tuple =torch.sum(torch.abs(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ : Tuple =torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def __magic_name__ ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Tuple =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] =self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : str =scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE__ : str =scheduler.timesteps
SCREAMING_SNAKE_CASE__ : Optional[int] =self.dummy_model()
SCREAMING_SNAKE_CASE__ : Dict =self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase_ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ : Tuple =model(UpperCamelCase_ , UpperCamelCase_ )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE__ : Optional[int] =None
else:
SCREAMING_SNAKE_CASE__ : Dict =timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ : Tuple =scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , prev_timestep=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE__ : Optional[int] =pred_prev_sample
SCREAMING_SNAKE_CASE__ : str =torch.sum(torch.abs(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def __magic_name__ ( self : Optional[Any] ) -> int:
pass
def __magic_name__ ( self : List[Any] ) -> List[str]:
pass
| 710 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = JukeboxTokenizer
snake_case_ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def __magic_name__ ( self : Optional[int] ) -> str:
import torch
SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : str =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __magic_name__ ( self : Any ) -> List[str]:
import torch
SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 665 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """timm_backbone"""
def __init__( self : Tuple , __lowercase : int=None , __lowercase : int=3 , __lowercase : List[Any]=True , __lowercase : Optional[int]=True , __lowercase : List[Any]=None , **__lowercase : int , ) -> Any:
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ : Optional[int] =backbone
SCREAMING_SNAKE_CASE__ : Any =num_channels
SCREAMING_SNAKE_CASE__ : Tuple =features_only
SCREAMING_SNAKE_CASE__ : Optional[Any] =use_pretrained_backbone
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : List[str] =out_indices if out_indices is not None else (-1,)
| 711 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_neox"""
def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : str =rotary_pct
SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE__ : str =classifier_dropout
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 665 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ : Any =["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
SCREAMING_SNAKE_CASE__ : str =dict(zip(_a , range(len(_a ) ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
SCREAMING_SNAKE_CASE__ : List[str] ={"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
SCREAMING_SNAKE_CASE__ : int ={
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __magic_name__ ( self : Dict , **__lowercase : List[Any] ) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def __magic_name__ ( self : Union[str, Any] , **__lowercase : int ) -> List[Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __magic_name__ ( self : Optional[Any] , **__lowercase : Optional[Any] ) -> List[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __magic_name__ ( self : Optional[int] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : str =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : List[str] =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Dict =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int =CLIPSegProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Any =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
SCREAMING_SNAKE_CASE__ : Any =CLIPSegProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Optional[Any] =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __magic_name__ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : List[str] =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ : Any =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __magic_name__ ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Optional[int] =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] =CLIPSegProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Tuple =image_processor(_a , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ : Dict =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =CLIPSegProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ="""lower newer"""
SCREAMING_SNAKE_CASE__ : List[str] =processor(text=_a )
SCREAMING_SNAKE_CASE__ : int =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Any =CLIPSegProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Any ="""lower newer"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : int =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __magic_name__ ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : str =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : List[Any] =CLIPSegProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : List[str] =processor(images=_a , visual_prompt=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __magic_name__ ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Any =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : int =CLIPSegProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Dict =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ : List[Any] =processor.batch_decode(_a )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
def _a( UpperCamelCase__ : Tuple ):
'''simple docstring'''
if isinstance(lowerCAmelCase__, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase__, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase__ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""pixel_values"""]
def __init__( self : Tuple , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 2_55 , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Any , ) -> None:
super().__init__(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict =size if size is not None else {'''shortest_edge''': 2_24}
SCREAMING_SNAKE_CASE__ : Dict =get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] =crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE__ : int =get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : List[Any] =do_resize
SCREAMING_SNAKE_CASE__ : int =size
SCREAMING_SNAKE_CASE__ : Any =do_center_crop
SCREAMING_SNAKE_CASE__ : Union[str, Any] =crop_size
SCREAMING_SNAKE_CASE__ : int =resample
SCREAMING_SNAKE_CASE__ : Any =do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] =do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : str =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : Tuple , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Tuple , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE__ : List[Any] =get_resize_output_image_size(_UpperCAmelCase , size['''shortest_edge'''] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE__ : Any =(size['''height'''], size['''width'''])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __magic_name__ ( self : Union[str, Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Union[str, Any] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : str =get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(_UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __magic_name__ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ) -> List[str]:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Tuple , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __magic_name__ ( self : Any , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Optional[int] =to_numpy_array(_UpperCAmelCase )
if do_resize:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[str] =self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase )
if do_normalize:
SCREAMING_SNAKE_CASE__ : List[Any] =self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def __magic_name__ ( self : Optional[Any] , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : int , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[str] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : int =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Dict =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : int =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : int =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Optional[int] =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[int] =size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[Any] =get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : List[str] =get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
SCREAMING_SNAKE_CASE__ : List[str] =make_batched(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict =[
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE__ : str ={'''pixel_values''': videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 714 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665 | 0 |
'''simple docstring'''
a_ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
a_ = [{"type": "code", "content": INSTALL_CONTENT}]
a_ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 715 |
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] =True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if a[i].islower():
SCREAMING_SNAKE_CASE__ : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a_ = {'facebook/blenderbot_small-90M': 5_1_2}
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =set()
SCREAMING_SNAKE_CASE__ : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : Tuple =char
SCREAMING_SNAKE_CASE__ : Dict =set(_lowerCamelCase )
return pairs
class __SCREAMING_SNAKE_CASE ( a__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : Any="__start__" , __lowercase : str="__end__" , __lowercase : Optional[Any]="__unk__" , __lowercase : Optional[Any]="__null__" , **__lowercase : str , ) -> Any:
super().__init__(unk_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , **_A )
with open(_A , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =json.load(_A )
SCREAMING_SNAKE_CASE__ : int ={v: k for k, v in self.encoder.items()}
with open(_A , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ : Optional[int] =merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE__ : Optional[int] =[tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =dict(zip(_A , range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE__ : Dict ={}
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.encoder )
def __magic_name__ ( self : Optional[Any] ) -> Tuple:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : Optional[int] , __lowercase : Dict ) -> List[Any]:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =re.sub('''([.,!?()])''' , r''' \1''' , _A )
SCREAMING_SNAKE_CASE__ : Any =re.sub('''(\')''' , r''' \1 ''' , _A )
SCREAMING_SNAKE_CASE__ : Tuple =re.sub(r'''\s{2,}''' , ''' ''' , _A )
if "\n" in token:
SCREAMING_SNAKE_CASE__ : Optional[Any] =token.replace('''\n''' , ''' __newln__''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =token.split(''' ''' )
SCREAMING_SNAKE_CASE__ : List[str] =[]
for token in tokens:
if not len(_A ):
continue
SCREAMING_SNAKE_CASE__ : str =token.lower()
SCREAMING_SNAKE_CASE__ : Optional[Any] =tuple(_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ : str =get_pairs(_A )
if not pairs:
words.append(_A )
continue
while True:
SCREAMING_SNAKE_CASE__ : str =min(_A , key=lambda __lowercase : self.bpe_ranks.get(_A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ : Tuple =bigram
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : Any =0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE__ : str =word.index(_A , _A )
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =tuple(_A )
SCREAMING_SNAKE_CASE__ : str =new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : List[Any] =get_pairs(_A )
SCREAMING_SNAKE_CASE__ : Any ='@@ '.join(_A )
SCREAMING_SNAKE_CASE__ : List[str] =word[:-4]
SCREAMING_SNAKE_CASE__ : Any =word
words.append(_A )
return " ".join(_A )
def __magic_name__ ( self : List[str] , __lowercase : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[]
SCREAMING_SNAKE_CASE__ : int =re.findall(r'''\S+\n?''' , _A )
for token in words:
split_tokens.extend(list(self.bpe(_A ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : Any , __lowercase : Dict ) -> int:
SCREAMING_SNAKE_CASE__ : Any =token.lower()
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] ) -> Any:
return self.decoder.get(_A , self.unk_token )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =' '.join(_A ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : Any , __lowercase : Any , __lowercase : List[Any] = None ) -> List[str]:
if not os.path.isdir(_A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE__ : Dict =os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : Tuple =os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '''\n''' )
SCREAMING_SNAKE_CASE__ : Tuple =0
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE__ : Dict =token_index
writer.write(''' '''.join(_A ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 716 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : Any =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : Dict =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : int =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : Any =embedding_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : str =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE__ : str =num_labels
SCREAMING_SNAKE_CASE__ : List[str] =num_choices
SCREAMING_SNAKE_CASE__ : List[str] =scope
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.num_choices
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return torch.tensor(
UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase )
model.to(__lowercase )
model.half()
SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj]
SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
| 665 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : str , __lowercase : Dict , __lowercase : Optional[Any]=7 , __lowercase : Any=3 , __lowercase : List[Any]=18 , __lowercase : str=30 , __lowercase : Any=4_00 , __lowercase : Optional[Any]=True , __lowercase : Optional[Any]=None , __lowercase : Tuple=True , ) -> int:
SCREAMING_SNAKE_CASE__ : int =size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE__ : List[Any] =parent
SCREAMING_SNAKE_CASE__ : int =batch_size
SCREAMING_SNAKE_CASE__ : Tuple =num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_size
SCREAMING_SNAKE_CASE__ : Tuple =min_resolution
SCREAMING_SNAKE_CASE__ : Tuple =max_resolution
SCREAMING_SNAKE_CASE__ : Any =do_resize
SCREAMING_SNAKE_CASE__ : Dict =size
SCREAMING_SNAKE_CASE__ : str =apply_ocr
def __magic_name__ ( self : Union[str, Any] ) -> Optional[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __SCREAMING_SNAKE_CASE ( _A , unittest.TestCase ):
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __magic_name__ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple =LayoutLMvaImageProcessingTester(self )
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''apply_ocr''' ) )
def __magic_name__ ( self : str ) -> int:
SCREAMING_SNAKE_CASE__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__ : int =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __magic_name__ ( self : int ) -> Optional[int]:
pass
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[int] =image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __lowerCamelCase )
self.assertIsInstance(encoding.boxes , __lowerCamelCase )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple =image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : int =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[str] =image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Any ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : int =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple =image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Tuple ) -> Dict:
# with apply_OCR = True
SCREAMING_SNAKE_CASE__ : List[Any] =LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : Optional[int] =load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : int =Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__ : str =image_processing(__lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__ : Dict =[["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
SCREAMING_SNAKE_CASE__ : Dict =[[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowerCamelCase )
self.assertListEqual(encoding.boxes , __lowerCamelCase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__ : Tuple =LayoutLMvaImageProcessor(apply_ocr=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] =image_processing(__lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 717 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 665 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _a( UpperCamelCase__ : str, UpperCamelCase__ : float | Decimal, UpperCamelCase__ : float = 1_0**-1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =a
while True:
SCREAMING_SNAKE_CASE__ : Tuple =Decimal(a_ ) - (
Decimal(eval(a_ ) ) / Decimal(eval(str(diff(a_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(a_ ) ) < precision: # noqa: S307
return float(a_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 718 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a_ = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Dict =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(
image=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : str =np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 665 | 0 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
a_ = logging.getLogger(__name__)
def _a( UpperCamelCase__ : str, UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =np.argmax(UpperCamelCase__, axis=1 )
return np.sum(outputs == labels )
def _a( UpperCamelCase__ : List[Any] ):
'''simple docstring'''
with open(UpperCamelCase__, encoding='''utf_8''' ) as f:
SCREAMING_SNAKE_CASE__ : List[str] =csv.reader(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =[]
next(UpperCamelCase__ ) # skip the first line
for line in tqdm(UpperCamelCase__ ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =[]
for dataset in encoded_datasets:
SCREAMING_SNAKE_CASE__ : Any =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =np.zeros((n_batch, 2, input_len), dtype=np.intaa )
SCREAMING_SNAKE_CASE__ : Any =np.zeros((n_batch, 2), dtype=np.intaa )
SCREAMING_SNAKE_CASE__ : List[str] =np.full((n_batch, 2, input_len), fill_value=-1_0_0, dtype=np.intaa )
SCREAMING_SNAKE_CASE__ : Optional[int] =np.zeros((n_batch,), dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE__ : Dict =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE__ : str =with_conta
SCREAMING_SNAKE_CASE__ : int =with_conta
SCREAMING_SNAKE_CASE__ : List[str] =len(UpperCamelCase__ ) - 1
SCREAMING_SNAKE_CASE__ : Tuple =len(UpperCamelCase__ ) - 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] =with_conta
SCREAMING_SNAKE_CASE__ : str =with_conta
SCREAMING_SNAKE_CASE__ : List[Any] =mc_label
SCREAMING_SNAKE_CASE__ : Optional[Any] =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCamelCase__ ) for t in all_inputs ) )
return tensor_datasets
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =argparse.ArgumentParser()
parser.add_argument('''--model_name''', type=UpperCamelCase__, default='''openai-gpt''', help='''pretrained model name''' )
parser.add_argument('''--do_train''', action='''store_true''', help='''Whether to run training.''' )
parser.add_argument('''--do_eval''', action='''store_true''', help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''', default=UpperCamelCase__, type=UpperCamelCase__, required=UpperCamelCase__, help='''The output directory where the model predictions and checkpoints will be written.''', )
parser.add_argument('''--train_dataset''', type=UpperCamelCase__, default='''''' )
parser.add_argument('''--eval_dataset''', type=UpperCamelCase__, default='''''' )
parser.add_argument('''--seed''', type=UpperCamelCase__, default=4_2 )
parser.add_argument('''--num_train_epochs''', type=UpperCamelCase__, default=3 )
parser.add_argument('''--train_batch_size''', type=UpperCamelCase__, default=8 )
parser.add_argument('''--eval_batch_size''', type=UpperCamelCase__, default=1_6 )
parser.add_argument('''--adam_epsilon''', default=1e-8, type=UpperCamelCase__, help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''', type=UpperCamelCase__, default=1 )
parser.add_argument(
'''--max_steps''', default=-1, type=UpperCamelCase__, help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
), )
parser.add_argument(
'''--gradient_accumulation_steps''', type=UpperCamelCase__, default=1, help='''Number of updates steps to accumulate before performing a backward/update pass.''', )
parser.add_argument('''--learning_rate''', type=UpperCamelCase__, default=6.25e-5 )
parser.add_argument('''--warmup_steps''', default=0, type=UpperCamelCase__, help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''', type=UpperCamelCase__, default='''warmup_linear''' )
parser.add_argument('''--weight_decay''', type=UpperCamelCase__, default=0.0_1 )
parser.add_argument('''--lm_coef''', type=UpperCamelCase__, default=0.9 )
parser.add_argument('''--n_valid''', type=UpperCamelCase__, default=3_7_4 )
parser.add_argument('''--server_ip''', type=UpperCamelCase__, default='''''', help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''', type=UpperCamelCase__, default='''''', help='''Can be used for distant debugging.''' )
SCREAMING_SNAKE_CASE__ : int =parser.parse_args()
print(UpperCamelCase__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE__ : str =torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(UpperCamelCase__, UpperCamelCase__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
SCREAMING_SNAKE_CASE__ : Tuple =['''_start_''', '''_delimiter_''', '''_classify_''']
SCREAMING_SNAKE_CASE__ : Dict =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
model.to(UpperCamelCase__ )
# Load and encode the datasets
def tokenize_and_encode(UpperCamelCase__ : Any ):
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__, UpperCamelCase__ ):
return obj
return [tokenize_and_encode(UpperCamelCase__ ) for o in obj]
logger.info('''Encoding dataset...''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =load_rocstories_dataset(args.train_dataset )
SCREAMING_SNAKE_CASE__ : List[str] =load_rocstories_dataset(args.eval_dataset )
SCREAMING_SNAKE_CASE__ : Optional[Any] =(train_dataset, eval_dataset)
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenize_and_encode(UpperCamelCase__ )
# Compute the max input length for the Transformer
SCREAMING_SNAKE_CASE__ : Any =model.config.n_positions // 2 - 2
SCREAMING_SNAKE_CASE__ : int =max(
len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =min(UpperCamelCase__, model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
SCREAMING_SNAKE_CASE__ : Dict =pre_process_datasets(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, *UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =tensor_datasets[0], tensor_datasets[1]
SCREAMING_SNAKE_CASE__ : Tuple =TensorDataset(*UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =RandomSampler(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =DataLoader(UpperCamelCase__, sampler=UpperCamelCase__, batch_size=args.train_batch_size )
SCREAMING_SNAKE_CASE__ : Dict =TensorDataset(*UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =SequentialSampler(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =DataLoader(UpperCamelCase__, sampler=UpperCamelCase__, batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] =args.max_steps
SCREAMING_SNAKE_CASE__ : List[str] =args.max_steps // (len(UpperCamelCase__ ) // args.gradient_accumulation_steps) + 1
else:
SCREAMING_SNAKE_CASE__ : List[str] =len(UpperCamelCase__ ) // args.gradient_accumulation_steps * args.num_train_epochs
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(model.named_parameters() )
SCREAMING_SNAKE_CASE__ : Optional[int] =['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AdamW(UpperCamelCase__, lr=args.learning_rate, eps=args.adam_epsilon )
SCREAMING_SNAKE_CASE__ : Dict =get_linear_schedule_with_warmup(
UpperCamelCase__, num_warmup_steps=args.warmup_steps, num_training_steps=UpperCamelCase__ )
if args.do_train:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ), desc='''Epoch''' ):
SCREAMING_SNAKE_CASE__ : List[Any] =0
SCREAMING_SNAKE_CASE__ : Dict =0
SCREAMING_SNAKE_CASE__ : Dict =tqdm(UpperCamelCase__, desc='''Training''' )
for step, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =tuple(t.to(UpperCamelCase__ ) for t in batch )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =batch
SCREAMING_SNAKE_CASE__ : Tuple =model(UpperCamelCase__, mc_token_ids=UpperCamelCase__, lm_labels=UpperCamelCase__, mc_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
SCREAMING_SNAKE_CASE__ : Any =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''Training loss: {:.2e} lr: {:.2e}'''.format(UpperCamelCase__, scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
SCREAMING_SNAKE_CASE__ : Tuple =model.module if hasattr(UpperCamelCase__, '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(args.output_dir, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =os.path.join(args.output_dir, UpperCamelCase__ )
torch.save(model_to_save.state_dict(), UpperCamelCase__ )
model_to_save.config.to_json_file(UpperCamelCase__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
SCREAMING_SNAKE_CASE__ : List[Any] =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
SCREAMING_SNAKE_CASE__ : List[str] =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCamelCase__ )
if args.do_eval:
model.eval()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =0, 0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =0, 0
for batch in tqdm(UpperCamelCase__, desc='''Evaluating''' ):
SCREAMING_SNAKE_CASE__ : Any =tuple(t.to(UpperCamelCase__ ) for t in batch )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =batch
with torch.no_grad():
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =model(
UpperCamelCase__, mc_token_ids=UpperCamelCase__, lm_labels=UpperCamelCase__, mc_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =mc_logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE__ : List[str] =mc_labels.to('''cpu''' ).numpy()
SCREAMING_SNAKE_CASE__ : Tuple =accuracy(UpperCamelCase__, UpperCamelCase__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
SCREAMING_SNAKE_CASE__ : Tuple =eval_loss / nb_eval_steps
SCREAMING_SNAKE_CASE__ : Dict =eval_accuracy / nb_eval_examples
SCREAMING_SNAKE_CASE__ : int =tr_loss / nb_tr_steps if args.do_train else None
SCREAMING_SNAKE_CASE__ : int ={'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
SCREAMING_SNAKE_CASE__ : List[Any] =os.path.join(args.output_dir, '''eval_results.txt''' )
with open(UpperCamelCase__, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''', UpperCamelCase__, str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 720 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =size
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size
SCREAMING_SNAKE_CASE__ : str =[0] * size
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =value
while index < self.size:
SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ : List[str] =value
else:
SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ : str =0
while left <= right:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase )
if left <= current_left:
SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] )
SCREAMING_SNAKE_CASE__ : Any =current_left
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =(1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _a( UpperCamelCase__ : int = 5_0_0_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =[(i * (3 * i - 1)) // 2 for i in range(1, __UpperCamelCase )]
for i, pentagonal_i in enumerate(__UpperCamelCase ):
for j in range(__UpperCamelCase, len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] =pentagonal_nums[j]
SCREAMING_SNAKE_CASE__ : List[Any] =pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE__ : Optional[Any] =pentagonal_j - pentagonal_i
if is_pentagonal(__UpperCamelCase ) and is_pentagonal(__UpperCamelCase ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 721 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""vqvae"""]
def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase )
def __magic_name__ ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , __lowercase ) else 10_00
@torch.no_grad()
def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowercase , device=self.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =noise
SCREAMING_SNAKE_CASE__ : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample(
generator=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample''']
else:
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample''']
if isinstance(self.scheduler , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE__ : Any =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) )
@torch.no_grad()
def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowercase )
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : int =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
| 665 | 0 |
import copy
import re
class __SCREAMING_SNAKE_CASE :
snake_case_ = '''hp'''
snake_case_ = {}
snake_case_ = None
@classmethod
def __magic_name__ ( cls : List[str] , __lowercase : Union[str, Any] , __lowercase : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : str =prefix
SCREAMING_SNAKE_CASE__ : List[str] =defaults
cls.build_naming_info()
@staticmethod
def __magic_name__ ( __lowercase : Optional[Any] , __lowercase : Optional[int] ) -> List[Any]:
if len(A_ ) == 0:
return ""
SCREAMING_SNAKE_CASE__ : Dict =None
if any(char.isdigit() for char in word ):
raise Exception(F"Parameters should not contain numbers: \'{word}\' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(A_ ) + 1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__lowercase : int ):
SCREAMING_SNAKE_CASE__ : Any =''''''
while integer != 0:
SCREAMING_SNAKE_CASE__ : str =chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
SCREAMING_SNAKE_CASE__ : List[Any] =0
while True:
SCREAMING_SNAKE_CASE__ : List[str] =word + '''#''' + int_to_alphabetic(A_ )
if sword in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE__ : int =sword
break
SCREAMING_SNAKE_CASE__ : str =short_word
SCREAMING_SNAKE_CASE__ : Optional[Any] =word
return short_word
@staticmethod
def __magic_name__ ( __lowercase : Any , __lowercase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =param_name.split('''_''' )
SCREAMING_SNAKE_CASE__ : int =[TrialShortNamer.shortname_for_word(A_ , A_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
SCREAMING_SNAKE_CASE__ : Tuple =['''''', '''_''']
for separator in separators:
SCREAMING_SNAKE_CASE__ : Optional[int] =separator.join(A_ )
if shortname not in info["reverse_short_param"]:
SCREAMING_SNAKE_CASE__ : Tuple =shortname
SCREAMING_SNAKE_CASE__ : str =param_name
return shortname
return param_name
@staticmethod
def __magic_name__ ( __lowercase : List[Any] , __lowercase : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Tuple =TrialShortNamer.shortname_for_key(A_ , A_ )
SCREAMING_SNAKE_CASE__ : Tuple =short_name
SCREAMING_SNAKE_CASE__ : Tuple =param_name
@classmethod
def __magic_name__ ( cls : Any ) -> Union[str, Any]:
if cls.NAMING_INFO is not None:
return
SCREAMING_SNAKE_CASE__ : Dict ={
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] =list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(A_ , A_ )
SCREAMING_SNAKE_CASE__ : List[Any] =info
@classmethod
def __magic_name__ ( cls : Any , __lowercase : List[str] ) -> Optional[int]:
cls.build_naming_info()
assert cls.PREFIX is not None
SCREAMING_SNAKE_CASE__ : Tuple =[copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
SCREAMING_SNAKE_CASE__ : Tuple =cls.NAMING_INFO['''short_param'''][k]
if isinstance(A_ , A_ ):
SCREAMING_SNAKE_CASE__ : List[str] =1 if v else 0
SCREAMING_SNAKE_CASE__ : str ='''''' if isinstance(A_ , (int, float) ) else '''-'''
SCREAMING_SNAKE_CASE__ : List[str] =F"{key}{sep}{v}"
name.append(A_ )
return "_".join(A_ )
@classmethod
def __magic_name__ ( cls : Any , __lowercase : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =repr[len(cls.PREFIX ) + 1 :]
if repr == "":
SCREAMING_SNAKE_CASE__ : Optional[Any] =[]
else:
SCREAMING_SNAKE_CASE__ : List[Any] =repr.split('''_''' )
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for value in values:
if "-" in value:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =value.split('''-''' )
else:
SCREAMING_SNAKE_CASE__ : Any =re.sub('''[0-9.]''' , '''''' , A_ )
SCREAMING_SNAKE_CASE__ : Tuple =float(re.sub('''[^0-9.]''' , '''''' , A_ ) )
SCREAMING_SNAKE_CASE__ : int =cls.NAMING_INFO['''reverse_short_param'''][p_k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =p_v
for k in cls.DEFAULTS:
if k not in parameters:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =cls.DEFAULTS[k]
return parameters
| 700 |
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 0 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
a_ = {
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =collections.OrderedDict()
with open(__SCREAMING_SNAKE_CASE, '''r''', encoding='''utf-8''' ) as reader:
SCREAMING_SNAKE_CASE__ : Any =reader.readlines()
for index, token in enumerate(__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ : Any =token.rstrip('''\n''' )
SCREAMING_SNAKE_CASE__ : Any =index
return vocab
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Tuple , __lowercase : Optional[int] , __lowercase : str="<unk>" , __lowercase : Optional[int]=2_00 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple =vocab
SCREAMING_SNAKE_CASE__ : List[Any] =unk_token
SCREAMING_SNAKE_CASE__ : List[str] =max_input_chars_per_word
def __magic_name__ ( self : Optional[Any] , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =list(__lowercase )
if len(__lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE__ : List[Any] =0
SCREAMING_SNAKE_CASE__ : Dict =[]
while start < len(__lowercase ):
SCREAMING_SNAKE_CASE__ : str =len(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =None
while start < end:
SCREAMING_SNAKE_CASE__ : Optional[int] =''''''.join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE__ : List[Any] =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =end
return sub_tokens
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['input_ids', 'attention_mask']
snake_case_ = False
def __init__( self : Tuple , __lowercase : List[str] , __lowercase : Union[str, Any]="<d>" , __lowercase : Optional[Any]="</d>" , __lowercase : int="<s>" , __lowercase : List[str]="</s>" , __lowercase : Any="<pad>" , __lowercase : List[Any]="<unk>" , __lowercase : str="</n>" , __lowercase : int="</_>" , __lowercase : List[str]="left" , **__lowercase : str , ) -> Union[str, Any]:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=__lowercase , eod_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , unk_token=__lowercase , line_token=__lowercase , space_token=__lowercase , padding_side=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : Tuple =bod_token
SCREAMING_SNAKE_CASE__ : Union[str, Any] =eod_token
SCREAMING_SNAKE_CASE__ : Any =load_vocab(__lowercase )
SCREAMING_SNAKE_CASE__ : int =self.encoder[space_token]
SCREAMING_SNAKE_CASE__ : List[str] =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE__ : Optional[int] =collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowercase : x[1] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : Any =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
return self.encoder[self.bod_token]
@property
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
return self.encoder[self.eod_token]
@property
def __magic_name__ ( self : Any ) -> str:
return self.encoder["\n"]
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return len(self.encoder )
def __magic_name__ ( self : Tuple ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : Union[str, Any] , __lowercase : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =[]
for x in jieba.cut(__lowercase , cut_all=__lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowercase ) )
return output_tokens
def __magic_name__ ( self : Dict , __lowercase : str , **__lowercase : Dict ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =[i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE__ : str =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__lowercase , **__lowercase )
def __magic_name__ ( self : int , __lowercase : Union[str, Any] ) -> int:
return token in self.encoder
def __magic_name__ ( self : Dict , __lowercase : Any ) -> Optional[Any]:
return "".join(__lowercase )
def __magic_name__ ( self : Any , __lowercase : Optional[int] ) -> Union[str, Any]:
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : List[Any] , __lowercase : Any ) -> Optional[int]:
return self.decoder.get(__lowercase , self.unk_token )
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : int = None ) -> Optional[int]:
if os.path.isdir(__lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
SCREAMING_SNAKE_CASE__ : Dict =(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
if " " in self.encoder:
SCREAMING_SNAKE_CASE__ : List[str] =self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE__ : List[Any] =self.encoder['''\n''']
del self.encoder["\n"]
SCREAMING_SNAKE_CASE__ : int =collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowercase : x[1] ) )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
''' Please check that the vocabulary is not corrupted!''' )
SCREAMING_SNAKE_CASE__ : Dict =token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : int = None ) -> Dict:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : List[Any] = None , __lowercase : List[str] = False ) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase ))
return [1] + ([0] * len(__lowercase ))
| 701 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test'''
SCREAMING_SNAKE_CASE__ : int ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) , 81 )
def __magic_name__ ( self : Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE__ : List[Any] =[
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
| 665 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __magic_name__ ( self : str ) -> int:
SCREAMING_SNAKE_CASE__ : str =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__lowercase , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(__lowercase , '''num_encoder_blocks''' ) )
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any , __lowercase : Tuple , __lowercase : Union[str, Any]=13 , __lowercase : Any=64 , __lowercase : Any=3 , __lowercase : str=4 , __lowercase : Union[str, Any]=[2, 2, 2, 2] , __lowercase : Optional[Any]=[8, 4, 2, 1] , __lowercase : List[str]=[16, 32, 64, 1_28] , __lowercase : Tuple=[1, 4, 8, 16] , __lowercase : List[Any]=[1, 2, 4, 8] , __lowercase : List[str]=True , __lowercase : List[Any]=True , __lowercase : List[str]="gelu" , __lowercase : List[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Dict=0.02 , __lowercase : Any=3 , __lowercase : Optional[Any]=None , ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] =batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size
SCREAMING_SNAKE_CASE__ : str =num_channels
SCREAMING_SNAKE_CASE__ : str =num_encoder_blocks
SCREAMING_SNAKE_CASE__ : int =sr_ratios
SCREAMING_SNAKE_CASE__ : str =depths
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_sizes
SCREAMING_SNAKE_CASE__ : List[str] =downsampling_rates
SCREAMING_SNAKE_CASE__ : List[str] =num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] =is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_labels
SCREAMING_SNAKE_CASE__ : List[str] =hidden_act
SCREAMING_SNAKE_CASE__ : List[str] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict =initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] =num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] =scope
def __magic_name__ ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[int] ) -> List[Any]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Tuple , __lowercase : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] =SegformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __magic_name__ ( self : str , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int =self.num_labels
SCREAMING_SNAKE_CASE__ : str =SegformerForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Any =model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __magic_name__ ( self : Tuple , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =1
SCREAMING_SNAKE_CASE__ : str =SegformerForSemanticSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =model(__lowercase , labels=__lowercase )
self.parent.assertGreater(result.loss , 0.0 )
def __magic_name__ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ : str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =config_and_inputs
SCREAMING_SNAKE_CASE__ : str ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =SegformerModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict =SegformerConfigTester(self , config_class=__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__lowercase )
def __magic_name__ ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__lowercase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def __magic_name__ ( self : str ) -> Union[str, Any]:
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def __magic_name__ ( self : Union[str, Any] ) -> str:
pass
def __magic_name__ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : int =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Any =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Tuple =True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] =True
SCREAMING_SNAKE_CASE__ : Dict =False
SCREAMING_SNAKE_CASE__ : str =True
SCREAMING_SNAKE_CASE__ : Any =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =outputs.attentions
SCREAMING_SNAKE_CASE__ : Union[str, Any] =sum(self.model_tester.depths )
self.assertEqual(len(__lowercase ) , __lowercase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : Dict =True
SCREAMING_SNAKE_CASE__ : Any =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : int =outputs.attentions
self.assertEqual(len(__lowercase ) , __lowercase )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE__ : Any =(self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE__ : Optional[Any] =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
SCREAMING_SNAKE_CASE__ : str =(self.model_tester.image_size // 32) ** 2
SCREAMING_SNAKE_CASE__ : List[str] =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Optional[int] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + 1 , len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : str =outputs.attentions
self.assertEqual(len(__lowercase ) , __lowercase )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE__ : List[Any] =(self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE__ : Optional[int] =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __magic_name__ ( self : str ) -> int:
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Any =self.model_tester.num_encoder_blocks
self.assertEqual(len(__lowercase ) , __lowercase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Dict =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[str] =True
for model_class in self.all_model_classes:
if model_class in get_values(__lowercase ):
continue
SCREAMING_SNAKE_CASE__ : Tuple =model_class(__lowercase )
model.to(__lowercase )
model.train()
SCREAMING_SNAKE_CASE__ : Dict =self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =model(**__lowercase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : str ) -> int:
pass
@slow
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[str] =SegformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def _a( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __magic_name__ ( self : Tuple ) -> Union[str, Any]:
# only resize + normalize
SCREAMING_SNAKE_CASE__ : int =SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__lowercase , align=__lowercase , do_random_crop=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] =image_processor(images=__lowercase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : int =encoded_inputs.pixel_values.to(__lowercase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : str =torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __lowercase , atol=1e-4 ) )
@slow
def __magic_name__ ( self : Union[str, Any] ) -> int:
# only resize + normalize
SCREAMING_SNAKE_CASE__ : Optional[Any] =SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__lowercase , align=__lowercase , do_random_crop=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] =image_processor(images=__lowercase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : int =encoded_inputs.pixel_values.to(__lowercase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __lowercase , atol=1e-1 ) )
@slow
def __magic_name__ ( self : str ) -> Any:
# only resize + normalize
SCREAMING_SNAKE_CASE__ : Optional[int] =SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__lowercase , align=__lowercase , do_random_crop=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__lowercase )
SCREAMING_SNAKE_CASE__ : str =prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] =image_processor(images=__lowercase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Any =encoded_inputs.pixel_values.to(__lowercase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase )
SCREAMING_SNAKE_CASE__ : int =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE__ : List[str] =image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(5_00, 3_00)] )
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =image_processor.post_process_semantic_segmentation(outputs=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 702 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size
SCREAMING_SNAKE_CASE__ : List[Any] =num_channels
SCREAMING_SNAKE_CASE__ : int =patch_size
SCREAMING_SNAKE_CASE__ : Tuple =num_frames
SCREAMING_SNAKE_CASE__ : List[Any] =is_training
SCREAMING_SNAKE_CASE__ : List[str] =use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : Any =scope
SCREAMING_SNAKE_CASE__ : int =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels
return config
def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )
# verify the logits shape
SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowercase )
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester(
self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int:
SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
pass
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def __magic_name__ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowercase )
@slow
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] =True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
SCREAMING_SNAKE_CASE__ : str =False
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : List[Any] =True
SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + 1 , len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self : Tuple ) -> List[Any]:
def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowercase ) , __lowercase )
SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[str] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Any ) -> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =prepare_video()
SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 665 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
snake_case_ = """deberta-v2"""
def __init__( self : Dict , __lowercase : Dict=12_81_00 , __lowercase : Any=15_36 , __lowercase : Union[str, Any]=24 , __lowercase : Optional[int]=24 , __lowercase : Any=61_44 , __lowercase : List[str]="gelu" , __lowercase : List[str]=0.1 , __lowercase : int=0.1 , __lowercase : Dict=5_12 , __lowercase : Optional[int]=0 , __lowercase : Union[str, Any]=0.02 , __lowercase : str=1e-7 , __lowercase : Union[str, Any]=False , __lowercase : str=-1 , __lowercase : str=0 , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=None , __lowercase : Any=0 , __lowercase : Any="gelu" , **__lowercase : Optional[int] , ) -> Optional[Any]:
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple =max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : int =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =relative_attention
SCREAMING_SNAKE_CASE__ : Dict =max_relative_positions
SCREAMING_SNAKE_CASE__ : Any =pad_token_id
SCREAMING_SNAKE_CASE__ : Any =position_biased_input
# Backwards compatibility
if type(__A ) == str:
SCREAMING_SNAKE_CASE__ : List[Any] =[x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE__ : List[str] =pos_att_type
SCREAMING_SNAKE_CASE__ : Tuple =vocab_size
SCREAMING_SNAKE_CASE__ : List[str] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] =kwargs.get('''pooler_hidden_size''' , __A )
SCREAMING_SNAKE_CASE__ : Tuple =pooler_dropout
SCREAMING_SNAKE_CASE__ : str =pooler_hidden_act
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
@property
def __magic_name__ ( self : str ) -> Tuple:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int ={0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : List[Any] ={0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __magic_name__ ( self : Dict ) -> Dict:
return 12
def __magic_name__ ( self : List[Any] , __lowercase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional["TensorType"] = None , __lowercase : int = 3 , __lowercase : int = 40 , __lowercase : int = 40 , __lowercase : "PreTrainedTokenizerBase" = None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =super().generate_dummy_inputs(preprocessor=__A , framework=__A )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 703 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
a_ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Any =do_lower_case
SCREAMING_SNAKE_CASE__ : Any =strip_accents
SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 665 | 0 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
a_ = 1_0_0
a_ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
a_ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
SCREAMING_SNAKE_CASE__ : set[int] =set()
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a( UpperCamelCase__ : int = 5_0_0_0 ):
'''simple docstring'''
for number_to_partition in range(1, _lowerCamelCase ):
if len(partition(_lowerCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 704 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a_ = False
a_ = False
def _a( UpperCamelCase__ : Namespace ):
'''simple docstring'''
return TrainCommand(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@staticmethod
def __magic_name__ ( __lowercase : ArgumentParser ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' )
SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =args.output
SCREAMING_SNAKE_CASE__ : str =args.column_label
SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text
SCREAMING_SNAKE_CASE__ : Tuple =args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split
SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size
SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate
SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon
def __magic_name__ ( self : Any ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __magic_name__ ( self : Optional[int] ) -> Tuple:
raise NotImplementedError
def __magic_name__ ( self : Dict ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 665 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ )
for i in range(n - 1 ):
for j in range(i + 1, UpperCamelCase__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if len(UpperCamelCase__ ) <= 1:
return arr, 0
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ ) // 2
SCREAMING_SNAKE_CASE__ : List[Any] =arr[0:mid]
SCREAMING_SNAKE_CASE__ : Tuple =arr[mid:]
SCREAMING_SNAKE_CASE__ : str =count_inversions_recursive(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =count_inversions_recursive(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =_count_cross_inversions(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
while i < len(UpperCamelCase__ ) and j < len(UpperCamelCase__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCamelCase__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCamelCase__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =[1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
SCREAMING_SNAKE_CASE__ : Tuple =count_inversions_bf(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =count_inversions_recursive(UpperCamelCase__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''', UpperCamelCase__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
SCREAMING_SNAKE_CASE__ : str =count_inversions_bf(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =count_inversions_recursive(UpperCamelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''', UpperCamelCase__ )
# an empty list should also have zero inversions
SCREAMING_SNAKE_CASE__ : Any =[]
SCREAMING_SNAKE_CASE__ : List[str] =count_inversions_bf(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =count_inversions_recursive(UpperCamelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''', UpperCamelCase__ )
if __name__ == "__main__":
main()
| 705 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = KandinskyVaaImgaImgPipeline
snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""]
snake_case_ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
snake_case_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[str] ) -> Tuple:
return 32
@property
def __magic_name__ ( self : List[str] ) -> str:
return 32
@property
def __magic_name__ ( self : Any ) -> Optional[int]:
return self.time_input_dim
@property
def __magic_name__ ( self : List[Any] ) -> int:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Tuple ) -> Optional[int]:
return 1_00
@property
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase )
return model
@property
def __magic_name__ ( self : Dict ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : str ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu'''
SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple =np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : int =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 0 |
'''simple docstring'''
import numpy as np
def _a( UpperCamelCase__ : np.array ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a_ = TypeVar('T')
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
snake_case_ = 42 # Cache store of keys
snake_case_ = 42 # References of the keys in cache
snake_case_ = 10 # Maximum capacity of cache
def __init__( self : Dict , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Any =deque()
SCREAMING_SNAKE_CASE__ : str =set()
if not n:
SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =n
def __magic_name__ ( self : List[str] , __lowercase : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> None:
for k in self.dq_store:
print(__lowercase )
def __repr__( self : List[Any] ) -> str:
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 665 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : str = 6 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple =None
SCREAMING_SNAKE_CASE__ : Any =None
self.create_linked_list(__lowerCAmelCase )
def __magic_name__ ( self : List[Any] , __lowercase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any =Node()
SCREAMING_SNAKE_CASE__ : Any =current_node
SCREAMING_SNAKE_CASE__ : Optional[int] =current_node
SCREAMING_SNAKE_CASE__ : Tuple =current_node
for _ in range(1 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any =Node()
SCREAMING_SNAKE_CASE__ : Optional[Any] =current_node
SCREAMING_SNAKE_CASE__ : Optional[Any] =previous_node
SCREAMING_SNAKE_CASE__ : Optional[Any] =current_node
SCREAMING_SNAKE_CASE__ : str =self.front
SCREAMING_SNAKE_CASE__ : Dict =previous_node
def __magic_name__ ( self : List[str] ) -> Dict:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __magic_name__ ( self : Dict , __lowercase : Any ) -> str:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.rear.next
if self.rear:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =data
def __magic_name__ ( self : Tuple ) -> List[str]:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
SCREAMING_SNAKE_CASE__ : str =self.front.data
SCREAMING_SNAKE_CASE__ : Optional[int] =None
return data
SCREAMING_SNAKE_CASE__ : Any =self.front
SCREAMING_SNAKE_CASE__ : Any =old_front.next
SCREAMING_SNAKE_CASE__ : int =old_front.data
SCREAMING_SNAKE_CASE__ : Any =None
return data
def __magic_name__ ( self : str ) -> str:
if self.is_empty():
raise Exception('''Empty Queue''' )
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class __SCREAMING_SNAKE_CASE :
def __init__( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : str =None
SCREAMING_SNAKE_CASE__ : Dict =None
SCREAMING_SNAKE_CASE__ : Tuple =None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float
for row in range(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col]
SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0]
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE__ : Tuple =0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, UpperCamelCase__ ):
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ )
]
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for x_val, y_val in enumerate(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE__ : Dict =y_val
SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ )
def interpolated_func(UpperCamelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase__ ) )
return interpolated_func
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Callable[[int], int]
SCREAMING_SNAKE_CASE__ : int
for poly in polynomials:
SCREAMING_SNAKE_CASE__ : Any =1
while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ):
x_val += 1
ret += poly(UpperCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def _a( UpperCamelCase__ : int, UpperCamelCase__ : Optional[int]=False, UpperCamelCase__ : Any=False, UpperCamelCase__ : Any=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] ="""vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Tuple =state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : List[Any] =in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : List[str] =in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : List[str] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : Any =in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : Dict =in_proj_bias[-config.hidden_size :]
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ )
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =val
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =ViltConfig(image_size=3_8_4, patch_size=3_2, tie_word_embeddings=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ : str =False
SCREAMING_SNAKE_CASE__ : str =False
SCREAMING_SNAKE_CASE__ : str =False
SCREAMING_SNAKE_CASE__ : Optional[int] =False
if "vqa" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : int =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =3_1_2_9
SCREAMING_SNAKE_CASE__ : int ="""huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : Dict ="""vqa2-id2label.json"""
SCREAMING_SNAKE_CASE__ : Dict =json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE__ : Optional[int] ={int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[Any] =idalabel
SCREAMING_SNAKE_CASE__ : Optional[int] ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] =ViltForQuestionAnswering(UpperCAmelCase__ )
elif "nlvr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Any =True
SCREAMING_SNAKE_CASE__ : Tuple =2
SCREAMING_SNAKE_CASE__ : Optional[int] ={0: """False""", 1: """True"""}
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={v: k for k, v in config.idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[str] =3
SCREAMING_SNAKE_CASE__ : Tuple =ViltForImagesAndTextClassification(UpperCAmelCase__ )
elif "irtr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Optional[int] =ViltForImageAndTextRetrieval(UpperCAmelCase__ )
elif "mlm_itm" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Tuple =ViltForMaskedLM(UpperCAmelCase__ )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Tuple =torch.hub.load_state_dict_from_url(UpperCAmelCase__, map_location='''cpu''' )["""state_dict"""]
SCREAMING_SNAKE_CASE__ : int =create_rename_keys(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__ )
if mlm_model or irtr_model:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
SCREAMING_SNAKE_CASE__ : Tuple =model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCAmelCase__ )
# Define processor
SCREAMING_SNAKE_CASE__ : Optional[Any] =ViltImageProcessor(size=3_8_4 )
SCREAMING_SNAKE_CASE__ : Optional[int] =BertTokenizer.from_pretrained('''bert-base-uncased''' )
SCREAMING_SNAKE_CASE__ : Any =ViltProcessor(UpperCAmelCase__, UpperCAmelCase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
SCREAMING_SNAKE_CASE__ : int =Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''', stream=UpperCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE__ : Optional[int] =Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''', stream=UpperCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE__ : Optional[int] =(
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
SCREAMING_SNAKE_CASE__ : int =processor(UpperCAmelCase__, UpperCAmelCase__, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Dict =processor(UpperCAmelCase__, UpperCAmelCase__, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Any =model(
input_ids=encoding_a.input_ids, pixel_values=encoding_a.pixel_values, pixel_values_a=encoding_a.pixel_values, )
else:
SCREAMING_SNAKE_CASE__ : Any =Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''', stream=UpperCAmelCase__ ).raw )
if mlm_model:
SCREAMING_SNAKE_CASE__ : str ="""a bunch of [MASK] laying on a [MASK]."""
else:
SCREAMING_SNAKE_CASE__ : List[str] ="""How many cats are there?"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] =processor(UpperCAmelCase__, UpperCAmelCase__, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Dict =model(**UpperCAmelCase__ )
# Verify outputs
if mlm_model:
SCREAMING_SNAKE_CASE__ : List[str] =torch.Size([1, 1_1, 3_0_5_2_2] )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], UpperCAmelCase__, atol=1e-4 )
# verify masked token prediction equals "cats"
SCREAMING_SNAKE_CASE__ : Any =outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
SCREAMING_SNAKE_CASE__ : Any =torch.Size([1, 3_1_2_9] )
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3], UpperCAmelCase__, atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], UpperCAmelCase__, atol=1e-4 )
# verify vqa prediction equals "2"
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
SCREAMING_SNAKE_CASE__ : List[Any] =torch.Size([1, 2] )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3], UpperCAmelCase__, atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
a_ = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 708 |
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left
SCREAMING_SNAKE_CASE__ : Optional[Any] =point
elif point > right:
SCREAMING_SNAKE_CASE__ : Optional[int] =right
SCREAMING_SNAKE_CASE__ : Tuple =point
else:
if item < current_item:
SCREAMING_SNAKE_CASE__ : str =point - 1
else:
SCREAMING_SNAKE_CASE__ : Tuple =point + 1
return None
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
a_ = 0
if debug == 1:
a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a_ = 6_7
a_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 665 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , __lowercase : Optional[int]=2 , __lowercase : Any=3 , __lowercase : Union[str, Any]=64 , __lowercase : List[Any]=None ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.random.default_rng(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : str =length
SCREAMING_SNAKE_CASE__ : List[str] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[str] ) -> Any:
return self.length
def __getitem__( self : Tuple , __lowercase : List[str] ) -> str:
return {"x": self.x[i], "y": self.y[i]}
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Optional[int] , __lowercase : int=0 , __lowercase : List[Any]=0 , __lowercase : Optional[Any]=False ) -> str:
super().__init__()
SCREAMING_SNAKE_CASE__ : int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE__ : List[str] =True
def __magic_name__ ( self : Optional[Any] , __lowercase : Union[str, Any]=None ) -> List[str]:
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE__ : Tuple =False
return x * self.a[0] + self.b[0]
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Tuple , __lowercase : List[Any]=0 , __lowercase : Any=0 , __lowercase : str=False ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ : Tuple =torch.nn.Parameter(torch.tensor(lowerCAmelCase__ ).float() )
SCREAMING_SNAKE_CASE__ : Any =torch.nn.Parameter(torch.tensor(lowerCAmelCase__ ).float() )
SCREAMING_SNAKE_CASE__ : Optional[int] =True
def __magic_name__ ( self : str , __lowercase : Tuple=None ) -> Tuple:
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE__ : Optional[Any] =False
return x * self.a + self.b
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : int = 1_6 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] =AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ : Optional[int] ={"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
SCREAMING_SNAKE_CASE__ : str =load_dataset('''csv''', data_files=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict =datasets["train"].unique('''label''' )
SCREAMING_SNAKE_CASE__ : List[str] ={v: i for i, v in enumerate(_lowercase )}
def tokenize_function(UpperCamelCase__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(
examples['''sentence1'''], examples['''sentence2'''], truncation=_lowercase, max_length=_lowercase, padding='''max_length''' )
if "label" in examples:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE__ : Any =datasets.map(
_lowercase, batched=_lowercase, remove_columns=['''sentence1''', '''sentence2''', '''label'''], )
def collate_fn(UpperCamelCase__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''' )
return tokenizer.pad(_lowercase, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : List[Any] =DataLoader(tokenized_datasets['''train'''], shuffle=_lowercase, collate_fn=_lowercase, batch_size=2 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =DataLoader(tokenized_datasets['''validation'''], shuffle=_lowercase, collate_fn=_lowercase, batch_size=1 )
return train_dataloader, eval_dataloader
| 709 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowercase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@slow
@require_torch
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 665 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
snake_case_ = """wavlm"""
def __init__( self : Tuple , __lowercase : Optional[int]=32 , __lowercase : Dict=7_68 , __lowercase : int=12 , __lowercase : Optional[Any]=12 , __lowercase : List[str]=30_72 , __lowercase : Tuple="gelu" , __lowercase : str=0.1 , __lowercase : List[Any]=0.1 , __lowercase : str=0.1 , __lowercase : int=0.0 , __lowercase : Any=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Dict=0.02 , __lowercase : Tuple=1e-5 , __lowercase : Optional[int]="group" , __lowercase : List[Any]="gelu" , __lowercase : List[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __lowercase : List[Any]=(5, 2, 2, 2, 2, 2, 2) , __lowercase : Dict=(10, 3, 3, 3, 3, 2, 2) , __lowercase : Union[str, Any]=False , __lowercase : int=1_28 , __lowercase : Tuple=16 , __lowercase : str=3_20 , __lowercase : List[str]=8_00 , __lowercase : Dict=False , __lowercase : int=True , __lowercase : Union[str, Any]=0.05 , __lowercase : int=10 , __lowercase : Optional[Any]=2 , __lowercase : Union[str, Any]=0.0 , __lowercase : Optional[int]=10 , __lowercase : Dict=3_20 , __lowercase : Dict=2 , __lowercase : Any=0.1 , __lowercase : Union[str, Any]=1_00 , __lowercase : str=2_56 , __lowercase : int=2_56 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple="mean" , __lowercase : Dict=False , __lowercase : str=False , __lowercase : Union[str, Any]=2_56 , __lowercase : Tuple=(5_12, 5_12, 5_12, 5_12, 15_00) , __lowercase : List[str]=(5, 3, 3, 1, 1) , __lowercase : Tuple=(1, 2, 3, 1, 1) , __lowercase : Optional[int]=5_12 , __lowercase : Any=80 , __lowercase : int=0 , __lowercase : Dict=1 , __lowercase : List[str]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=3 , __lowercase : Optional[int]=2 , __lowercase : Optional[Any]=3 , __lowercase : Optional[Any]=None , **__lowercase : List[str] , ) -> int:
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =feat_extract_norm
SCREAMING_SNAKE_CASE__ : Union[str, Any] =feat_extract_activation
SCREAMING_SNAKE_CASE__ : Union[str, Any] =list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] =list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =conv_bias
SCREAMING_SNAKE_CASE__ : List[Any] =num_buckets
SCREAMING_SNAKE_CASE__ : str =max_bucket_distance
SCREAMING_SNAKE_CASE__ : Tuple =num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : int =num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : Optional[int] =len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : List[Any] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict =intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : str =hidden_dropout
SCREAMING_SNAKE_CASE__ : Tuple =attention_dropout
SCREAMING_SNAKE_CASE__ : int =activation_dropout
SCREAMING_SNAKE_CASE__ : Any =feat_proj_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] =final_dropout
SCREAMING_SNAKE_CASE__ : Any =layerdrop
SCREAMING_SNAKE_CASE__ : List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =num_ctc_classes
SCREAMING_SNAKE_CASE__ : Any =vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_stable_layer_norm
SCREAMING_SNAKE_CASE__ : Optional[int] =use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ : str =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : str =apply_spec_augment
SCREAMING_SNAKE_CASE__ : str =mask_time_prob
SCREAMING_SNAKE_CASE__ : int =mask_time_length
SCREAMING_SNAKE_CASE__ : int =mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Union[str, Any] =mask_feature_prob
SCREAMING_SNAKE_CASE__ : Tuple =mask_feature_length
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE__ : Tuple =num_codevectors_per_group
SCREAMING_SNAKE_CASE__ : Dict =num_codevector_groups
SCREAMING_SNAKE_CASE__ : Union[str, Any] =contrastive_logits_temperature
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_negatives
SCREAMING_SNAKE_CASE__ : Optional[Any] =codevector_dim
SCREAMING_SNAKE_CASE__ : Tuple =proj_codevector_dim
SCREAMING_SNAKE_CASE__ : List[Any] =diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE__ : Any =ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : int =ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE__ : Tuple =add_adapter
SCREAMING_SNAKE_CASE__ : List[str] =adapter_kernel_size
SCREAMING_SNAKE_CASE__ : int =adapter_stride
SCREAMING_SNAKE_CASE__ : List[str] =num_adapter_layers
SCREAMING_SNAKE_CASE__ : Dict =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : List[Any] =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Optional[int] =list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any =list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =xvector_output_dim
@property
def __magic_name__ ( self : Any ) -> Union[str, Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 710 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = JukeboxTokenizer
snake_case_ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def __magic_name__ ( self : Optional[int] ) -> str:
import torch
SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : str =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __magic_name__ ( self : Any ) -> List[str]:
import torch
SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 665 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : str ) -> str:
SCREAMING_SNAKE_CASE__ : Any =0
def __magic_name__ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def __magic_name__ ( self : str ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple =Path(UpperCAmelCase_ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Any =Path(UpperCAmelCase_ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase_ , '''w''' ) )
SCREAMING_SNAKE_CASE__ : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def __magic_name__ ( self : Optional[int] ) -> Any:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int =Path(UpperCAmelCase_ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Optional[int] =Path(UpperCAmelCase_ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase_ , '''w''' ) )
SCREAMING_SNAKE_CASE__ : str =AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def __magic_name__ ( self : List[str] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict =CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE__ : List[str] =Path(UpperCAmelCase_ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : str =Path(UpperCAmelCase_ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase_ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE__ : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase_ ).to_dict()
config_dict.pop('''image_processor_type''' )
SCREAMING_SNAKE_CASE__ : Any =CLIPImageProcessor(**UpperCAmelCase_ )
# save in new folder
model_config.save_pretrained(UpperCAmelCase_ )
config.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ : List[str] =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def __magic_name__ ( self : List[str] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] =Path(UpperCAmelCase_ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase_ , '''w''' ) , )
SCREAMING_SNAKE_CASE__ : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def __magic_name__ ( self : Any ) -> str:
with self.assertRaisesRegex(
UpperCAmelCase_ , '''clip-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ : Tuple =AutoImageProcessor.from_pretrained('''clip-base''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
with self.assertRaisesRegex(
UpperCAmelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase_ , revision='''aaaaaa''' )
def __magic_name__ ( self : List[str] ) -> List[Any]:
with self.assertRaisesRegex(
UpperCAmelCase_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE__ : str =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Any =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __magic_name__ ( self : List[Any] ) -> Tuple:
try:
AutoConfig.register('''custom''' , UpperCAmelCase_ )
AutoImageProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoImageProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[int] =Path(UpperCAmelCase_ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Path(UpperCAmelCase_ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase_ , '''w''' ) )
SCREAMING_SNAKE_CASE__ : Any =CustomImageProcessor.from_pretrained(UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __magic_name__ ( self : Union[str, Any] ) -> str:
class __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
snake_case_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase_ )
AutoImageProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ : List[Any] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ : List[Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ : str =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase_ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 711 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_neox"""
def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : str =rotary_pct
SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE__ : str =classifier_dropout
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 665 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name )
a_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@staticmethod
def __magic_name__ ( __lowercase : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_SCREAMING_SNAKE_CASE , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self : Optional[Any] , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : List[str] , *__lowercase : List[Any] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any =logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"Loading model {model_type}" )
SCREAMING_SNAKE_CASE__ : int =model_type
SCREAMING_SNAKE_CASE__ : List[Any] =tf_checkpoint
SCREAMING_SNAKE_CASE__ : Any =pytorch_dump_output
SCREAMING_SNAKE_CASE__ : Any =config
SCREAMING_SNAKE_CASE__ : List[str] =finetuning_task_name
def __magic_name__ ( self : List[str] ) -> Dict:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
if "ckpt" in self._tf_checkpoint.lower():
SCREAMING_SNAKE_CASE__ : Tuple =self._tf_checkpoint
SCREAMING_SNAKE_CASE__ : Union[str, Any] =''''''
else:
SCREAMING_SNAKE_CASE__ : List[Any] =self._tf_checkpoint
SCREAMING_SNAKE_CASE__ : Tuple =''''''
convert_transfo_xl_checkpoint_to_pytorch(
_SCREAMING_SNAKE_CASE , self._config , self._pytorch_dump_output , _SCREAMING_SNAKE_CASE )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 713 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple ={
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE__ : int ={
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{src_lang}-{tgt_lang}"
SCREAMING_SNAKE_CASE__ : int =f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(UpperCamelCase__, exist_ok=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =os.path.join(UpperCamelCase__, '''README.md''' )
print(f"Generating {path}" )
with open(UpperCamelCase__, '''w''', encoding='''utf-8''' ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
a_ = Path(__file__).resolve().parent.parent.parent
a_ = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ , a_ , a_ = model_name.split('-')
a_ = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 714 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665 | 0 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , __lowercase : Union[str, Any] , __lowercase : int=13 , __lowercase : List[Any]=7 , __lowercase : str=True , __lowercase : List[Any]=True , __lowercase : Union[str, Any]=False , __lowercase : Optional[int]=True , __lowercase : Dict=99 , __lowercase : List[Any]=64 , __lowercase : Optional[int]=5 , __lowercase : Union[str, Any]=4 , __lowercase : List[Any]=64 , __lowercase : Union[str, Any]="gelu" , __lowercase : Any=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Dict=5_12 , __lowercase : Tuple=16 , __lowercase : int=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=3 , __lowercase : str=4 , __lowercase : int=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =parent
SCREAMING_SNAKE_CASE__ : List[Any] =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : int =is_training
SCREAMING_SNAKE_CASE__ : int =use_input_mask
SCREAMING_SNAKE_CASE__ : List[str] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : Tuple =use_labels
SCREAMING_SNAKE_CASE__ : int =vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : int =type_vocab_size
SCREAMING_SNAKE_CASE__ : Tuple =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[int] =initializer_range
SCREAMING_SNAKE_CASE__ : int =num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] =num_choices
SCREAMING_SNAKE_CASE__ : int =scope
def __magic_name__ ( self : str ) -> List[str]:
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Any =None
SCREAMING_SNAKE_CASE__ : List[str] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : str =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Any =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Any ) -> Dict:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __magic_name__ ( self : List[Any] , __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : int =MPNetModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : int =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : int , __lowercase : List[Any] , __lowercase : int , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any =MPNetForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(
__lowercase , attention_mask=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Tuple , __lowercase : Any , __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MPNetForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =self.num_choices
SCREAMING_SNAKE_CASE__ : Dict =MPNetForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : str =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Dict =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple =model(
__lowercase , attention_mask=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : Tuple , __lowercase : Optional[int] , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Any =self.num_labels
SCREAMING_SNAKE_CASE__ : Dict =MPNetForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] =self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Union[str, Any] =config_and_inputs
SCREAMING_SNAKE_CASE__ : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case_ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[str] =MPNetModelTester(self )
SCREAMING_SNAKE_CASE__ : Any =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowercase )
def __magic_name__ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__lowercase )
def __magic_name__ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__lowercase )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __magic_name__ ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
SCREAMING_SNAKE_CASE__ : Dict =torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : int =torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1e-4 ) )
| 715 |
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] =True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if a[i].islower():
SCREAMING_SNAKE_CASE__ : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =0
if start < end:
SCREAMING_SNAKE_CASE__ : List[str] =randint(_lowerCAmelCase, _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =a[end]
SCREAMING_SNAKE_CASE__ : Any =a[pivot]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =temp
SCREAMING_SNAKE_CASE__ : Union[str, Any] =_in_place_partition(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
count += _in_place_quick_sort(_lowerCAmelCase, _lowerCAmelCase, p - 1 )
count += _in_place_quick_sort(_lowerCAmelCase, p + 1, _lowerCAmelCase )
return count
def _a( UpperCamelCase__ : int, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : int =randint(_lowerCAmelCase, _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple =a[end]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =a[pivot]
SCREAMING_SNAKE_CASE__ : Optional[int] =temp
SCREAMING_SNAKE_CASE__ : Any =start - 1
for index in range(_lowerCAmelCase, _lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
SCREAMING_SNAKE_CASE__ : Any =new_pivot_index + 1
SCREAMING_SNAKE_CASE__ : Optional[Any] =a[new_pivot_index]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =a[index]
SCREAMING_SNAKE_CASE__ : List[str] =temp
SCREAMING_SNAKE_CASE__ : List[str] =a[new_pivot_index + 1]
SCREAMING_SNAKE_CASE__ : Dict =a[end]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 1_0_0 # 1000 elements are to be sorted
a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 716 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : Any =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : Dict =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : int =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : Any =embedding_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : str =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE__ : str =num_labels
SCREAMING_SNAKE_CASE__ : List[str] =num_choices
SCREAMING_SNAKE_CASE__ : List[str] =scope
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.num_choices
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return torch.tensor(
UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase )
model.to(__lowercase )
model.half()
SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj]
SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
| 665 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Tuple , __lowercase : Dict=99 , __lowercase : Tuple=13 , __lowercase : Union[str, Any]=7 , __lowercase : int=9 , __lowercase : Optional[int]=True , __lowercase : Any=True , __lowercase : str=False , __lowercase : Optional[int]=32 , __lowercase : Optional[Any]=5 , __lowercase : Dict=4 , __lowercase : Dict=37 , __lowercase : Tuple=8 , __lowercase : List[Any]=0.1 , __lowercase : str=0.002 , __lowercase : List[Any]=1 , __lowercase : int=0 , __lowercase : Optional[Any]=0 , __lowercase : Optional[int]=None , __lowercase : List[str]=None , ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parent
SCREAMING_SNAKE_CASE__ : int =batch_size
SCREAMING_SNAKE_CASE__ : Dict =encoder_seq_length
SCREAMING_SNAKE_CASE__ : Tuple =decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : str =self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Any =is_training
SCREAMING_SNAKE_CASE__ : Optional[int] =use_attention_mask
SCREAMING_SNAKE_CASE__ : int =use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_size
SCREAMING_SNAKE_CASE__ : List[str] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =d_ff
SCREAMING_SNAKE_CASE__ : int =relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : int =dropout_rate
SCREAMING_SNAKE_CASE__ : Optional[int] =initializer_factor
SCREAMING_SNAKE_CASE__ : List[str] =eos_token_id
SCREAMING_SNAKE_CASE__ : int =pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple =decoder_start_token_id
SCREAMING_SNAKE_CASE__ : str =None
SCREAMING_SNAKE_CASE__ : int =decoder_layers
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return TaConfig.from_pretrained('''google/umt5-base''' )
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Dict=None , __lowercase : str=None , __lowercase : Any=None , __lowercase : Dict=None , ) -> List[Any]:
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : str =input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple =decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : int =torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__lowercase )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__lowercase )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple =torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__lowercase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __magic_name__ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE__ : List[Any] =input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : List[Any] =self.get_config()
SCREAMING_SNAKE_CASE__ : str =config.num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_inputs_dict(__lowercase , __lowercase , __lowercase )
return config, input_dict
def __magic_name__ ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.prepare_config_and_inputs()
return config, inputs_dict
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __magic_name__ ( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : List[str] , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Tuple , ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple =UMTaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
input_ids=__lowercase , decoder_input_ids=__lowercase , attention_mask=__lowercase , decoder_attention_mask=__lowercase , )
SCREAMING_SNAKE_CASE__ : Dict =model(input_ids=__lowercase , decoder_input_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =result.last_hidden_state
SCREAMING_SNAKE_CASE__ : Any =result.past_key_values
SCREAMING_SNAKE_CASE__ : List[str] =result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__lowercase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : str , __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Optional[int] , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =UMTaModel(config=__lowercase ).get_decoder().to(__lowercase ).eval()
# first forward pass
SCREAMING_SNAKE_CASE__ : str =model(__lowercase , use_cache=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , use_cache=__lowercase )
self.parent.assertTrue(len(__lowercase ) == len(__lowercase ) )
self.parent.assertTrue(len(__lowercase ) == len(__lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : str =ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , past_key_values=__lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : Dict =ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Tuple =output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-3 ) )
def __magic_name__ ( self : Any , __lowercase : str , __lowercase : str , ) -> int:
SCREAMING_SNAKE_CASE__ : Dict =UMTaModel(config=__lowercase ).to(__lowercase ).half().eval()
SCREAMING_SNAKE_CASE__ : str =model(**__lowercase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__lowercase ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
snake_case_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
snake_case_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
snake_case_ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = True
snake_case_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
snake_case_ = [0.8, 0.9]
def __magic_name__ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : int =UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __magic_name__ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =UMTaModel(config_and_inputs[0] ).to(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__lowercase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=__lowercase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __magic_name__ ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] =config_and_inputs[0]
SCREAMING_SNAKE_CASE__ : Tuple =UMTaForConditionalGeneration(__lowercase ).eval()
model.to(__lowercase )
SCREAMING_SNAKE_CASE__ : str ={
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__lowercase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowercase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowercase ),
}
for attn_name, (name, mask) in zip(__lowercase , head_masking.items() ):
SCREAMING_SNAKE_CASE__ : Optional[Any] ={name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.ones(
config.num_decoder_layers , config.num_heads , device=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__lowercase , return_dict_in_generate=__lowercase , **__lowercase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
SCREAMING_SNAKE_CASE__ : Dict =out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __magic_name__ ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__lowercase ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__lowercase , legacy=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =[
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer(__lowercase , return_tensors='''pt''' , padding=__lowercase ).input_ids
# fmt: off
SCREAMING_SNAKE_CASE__ : Any =torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : str =model.generate(input_ids.to(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
SCREAMING_SNAKE_CASE__ : Dict =tokenizer.batch_decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
| 717 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 665 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : int , __lowercase : Any=13 , __lowercase : str=30 , __lowercase : str=2 , __lowercase : Optional[int]=3 , __lowercase : List[Any]=True , __lowercase : Optional[int]=True , __lowercase : Optional[int]=32 , __lowercase : int=5 , __lowercase : Optional[int]=4 , __lowercase : Any=37 , __lowercase : int="gelu" , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Tuple=10 , __lowercase : Any=0.02 , __lowercase : Optional[Any]=3 , __lowercase : Dict=None , __lowercase : Dict=2 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =parent
SCREAMING_SNAKE_CASE__ : Tuple =batch_size
SCREAMING_SNAKE_CASE__ : List[str] =image_size
SCREAMING_SNAKE_CASE__ : List[str] =patch_size
SCREAMING_SNAKE_CASE__ : Dict =num_channels
SCREAMING_SNAKE_CASE__ : Dict =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : str =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[int] =initializer_range
SCREAMING_SNAKE_CASE__ : Tuple =scope
SCREAMING_SNAKE_CASE__ : List[Any] =encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ : Optional[int] =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Tuple =num_patches + 2
def __magic_name__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Any =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : str =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ) -> Dict:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __magic_name__ ( self : Dict , __lowercase : str , __lowercase : List[str] , __lowercase : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =DeiTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : int , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =DeiTForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : int =1
SCREAMING_SNAKE_CASE__ : Optional[int] =DeiTForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ : int =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Any =model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self : Optional[int] , __lowercase : str , __lowercase : Any , __lowercase : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Any =model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : List[str] =1
SCREAMING_SNAKE_CASE__ : Tuple =DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ : str =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[str] =model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE__
) : Optional[int] =config_and_inputs
SCREAMING_SNAKE_CASE__ : List[Any] ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
snake_case_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =DeiTModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __magic_name__ ( self : int ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Dict =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def __magic_name__ ( self : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str =model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Tuple =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Tuple =["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def __magic_name__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __magic_name__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def __magic_name__ ( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str]=False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __magic_name__ ( self : Tuple ) -> str:
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__ : List[str] =model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
SCREAMING_SNAKE_CASE__ : List[str] =self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =model(**lowerCamelCase__ ).loss
loss.backward()
def __magic_name__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ : int =False
SCREAMING_SNAKE_CASE__ : Tuple =True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__ : str =model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
SCREAMING_SNAKE_CASE__ : str =self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**lowerCamelCase__ ).loss
loss.backward()
def __magic_name__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE__ : Any =problem_type["title"]
SCREAMING_SNAKE_CASE__ : Dict =problem_type["num_labels"]
SCREAMING_SNAKE_CASE__ : str =model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
SCREAMING_SNAKE_CASE__ : Tuple =self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__ : List[Any] =inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =inputs["labels"].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def __magic_name__ ( self : str ) -> Optional[int]:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : int =DeiTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Any ) -> str:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[Any] =prepare_img()
SCREAMING_SNAKE_CASE__ : Dict =image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] =model(**lowerCamelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE__ : int =torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =torch.tensor([-1.0266, 0.1912, -1.2861] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __magic_name__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : int =prepare_img()
SCREAMING_SNAKE_CASE__ : Dict =image_processor(images=lowerCamelCase__ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Any =inputs.pixel_values.to(lowerCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(lowerCamelCase__ )
| 718 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __a , unittest.TestCase ):
snake_case_ = GPTaTokenizer
snake_case_ = GPTaTokenizerFast
snake_case_ = True
snake_case_ = {"add_prefix_space": True}
snake_case_ = False
def __magic_name__ ( self : Optional[int] ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Any =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
SCREAMING_SNAKE_CASE__ : List[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE__ : str =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE__ : Any ={'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE__ : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __magic_name__ ( self : Optional[int] , **__lowercase : int ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] , **__lowercase : Union[str, Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : List[Any] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str ='''lower newer'''
SCREAMING_SNAKE_CASE__ : str ='''lower newer'''
return input_text, output_text
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''lower newer'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] =tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple =self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''lower newer'''
# Testing tokenization
SCREAMING_SNAKE_CASE__ : str =tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[int] =rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : Optional[int] =tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __magic_name__ ( self : Dict , *__lowercase : Optional[Any] , **__lowercase : List[str] ) -> Optional[int]:
pass
def __magic_name__ ( self : List[str] , __lowercase : List[str]=15 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE__ : List[Any] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
# Simple input
SCREAMING_SNAKE_CASE__ : Any ='''This is a simple input'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE__ : List[Any] =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' , )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
SCREAMING_SNAKE_CASE__ : int ='''This is a simple input'''
SCREAMING_SNAKE_CASE__ : int =['''This is a simple input looooooooong''', '''This is a simple input''']
SCREAMING_SNAKE_CASE__ : Tuple =('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE__ : int =[
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(lowerCAmelCase_ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ : str =tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ : int =tokenizer(*lowerCAmelCase_ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str ='''$$$'''
SCREAMING_SNAKE_CASE__ : Any =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase_ , add_bos_token=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''This is a simple input'''
SCREAMING_SNAKE_CASE__ : int =['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer(lowerCAmelCase_ )
self.assertEqual(out_s.input_ids[0] , lowerCAmelCase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : Dict =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCAmelCase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
pass
def __magic_name__ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.get_tokenizer(do_lower_case=lowerCAmelCase_ , add_bos_token=lowerCAmelCase_ )]
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : str ='''Encode this.'''
SCREAMING_SNAKE_CASE__ : int ='''This one too please.'''
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
encoded_sequence += tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Dict =tokenizer.encode_plus(
lowerCAmelCase_ , lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =encoded_sequence_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple =encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ : Any =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCAmelCase_ )
]
SCREAMING_SNAKE_CASE__ : str =[x for x in filtered_sequence if x is not None]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''A photo of a cat'''
SCREAMING_SNAKE_CASE__ : Dict =tokenizer.encode(
lowerCAmelCase_ , )
self.assertEqual(lowerCAmelCase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''test_opt''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoTokenizer.from_pretrained('''./test_opt''' )
SCREAMING_SNAKE_CASE__ : Any =tokenizer.encode(
lowerCAmelCase_ , )
self.assertEqual(lowerCAmelCase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
def __magic_name__ ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : str ='''A photo of a cat'''
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.encode(
lowerCAmelCase_ , )
# Same as above
self.assertEqual(lowerCAmelCase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] ='''bos'''
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.get_vocab()['''bos''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''A photo of a cat'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.encode(
lowerCAmelCase_ , )
# We changed the bos token
self.assertEqual(lowerCAmelCase_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''./tok''' )
SCREAMING_SNAKE_CASE__ : List[Any] =AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
SCREAMING_SNAKE_CASE__ : Dict =tokenizer.encode(
lowerCAmelCase_ , )
self.assertEqual(lowerCAmelCase_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 665 | 0 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
a_ = '\nimport os\n'
a_ = '\ndef foo():\n import os\n return False\n'
a_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
a_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
a_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
a_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
a_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
a_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
a_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
a_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
a_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''', UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =os.path.join(UpperCamelCase__, '''test_file.py''' )
with open(UpperCamelCase__, '''w''' ) as _tmp_file:
_tmp_file.write(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_imports(UpperCamelCase__ )
assert parsed_imports == ["os"]
| 720 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =size
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size
SCREAMING_SNAKE_CASE__ : str =[0] * size
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =value
while index < self.size:
SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ : List[str] =value
else:
SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ : str =0
while left <= right:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase )
if left <= current_left:
SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] )
SCREAMING_SNAKE_CASE__ : Any =current_left
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
import numpy as np
def _a( UpperCamelCase__ : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""vqvae"""]
def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase )
def __magic_name__ ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , __lowercase ) else 10_00
@torch.no_grad()
def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowercase , device=self.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =noise
SCREAMING_SNAKE_CASE__ : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample(
generator=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample''']
else:
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample''']
if isinstance(self.scheduler , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE__ : Any =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) )
@torch.no_grad()
def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowercase )
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : int =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
| 665 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700 |
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Collection[float] | None = None ) -> None:
if components is None:
SCREAMING_SNAKE_CASE__ : Tuple =[]
SCREAMING_SNAKE_CASE__ : List[Any] =list(__lowercase )
def __len__( self : int ) -> int:
return len(self.__components )
def __str__( self : Dict ) -> str:
return "(" + ",".join(map(__lowercase , self.__components ) ) + ")"
def __add__( self : str , __lowercase : Vector ) -> Vector:
SCREAMING_SNAKE_CASE__ : Tuple =len(self )
if size == len(__lowercase ):
SCREAMING_SNAKE_CASE__ : int =[self.__components[i] + other.component(__lowercase ) for i in range(__lowercase )]
return Vector(__lowercase )
else:
raise Exception('''must have the same size''' )
def __sub__( self : List[Any] , __lowercase : Vector ) -> Vector:
SCREAMING_SNAKE_CASE__ : Dict =len(self )
if size == len(__lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[self.__components[i] - other.component(__lowercase ) for i in range(__lowercase )]
return Vector(__lowercase )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self : List[str] , __lowercase : float ) -> Vector:
...
@overload
def __mul__( self : Any , __lowercase : Vector ) -> float:
...
def __mul__( self : Optional[Any] , __lowercase : float | Vector ) -> float | Vector:
if isinstance(__lowercase , (float, int) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[c * other for c in self.__components]
return Vector(__lowercase )
elif isinstance(__lowercase , __lowercase ) and len(self ) == len(__lowercase ):
SCREAMING_SNAKE_CASE__ : int =len(self )
SCREAMING_SNAKE_CASE__ : int =[self.__components[i] * other.component(__lowercase ) for i in range(__lowercase )]
return sum(__lowercase )
else: # error case
raise Exception('''invalid operand!''' )
def __magic_name__ ( self : Optional[int] ) -> Vector:
return Vector(self.__components )
def __magic_name__ ( self : Union[str, Any] , __lowercase : int ) -> float:
if isinstance(__lowercase , __lowercase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def __magic_name__ ( self : Tuple , __lowercase : int , __lowercase : float ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
SCREAMING_SNAKE_CASE__ : List[Any] =value
def __magic_name__ ( self : Optional[int] ) -> float:
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
SCREAMING_SNAKE_CASE__ : str =[c**2 for c in self.__components]
return math.sqrt(sum(__lowercase ) )
def __magic_name__ ( self : str , __lowercase : Vector , __lowercase : bool = False ) -> float:
SCREAMING_SNAKE_CASE__ : str =self * other
SCREAMING_SNAKE_CASE__ : str =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
return Vector([0] * dimension )
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[str] ):
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) and (isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ))
SCREAMING_SNAKE_CASE__ : Optional[Any] =[0] * dimension
SCREAMING_SNAKE_CASE__ : Union[str, Any] =1
return Vector(_SCREAMING_SNAKE_CASE )
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple ):
'''simple docstring'''
assert (
isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
and (isinstance(_SCREAMING_SNAKE_CASE, (int, float) ))
)
return x * scalar + y
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : str ):
'''simple docstring'''
random.seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[random.randint(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE )]
return Vector(_SCREAMING_SNAKE_CASE )
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , __lowercase : list[list[float]] , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : int =matrix
SCREAMING_SNAKE_CASE__ : Any =w
SCREAMING_SNAKE_CASE__ : Any =h
def __str__( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : int , __lowercase : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[]
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =[
self.__matrix[i][j] + other.component(__lowercase , __lowercase )
for j in range(self.__width )
]
matrix.append(__lowercase )
return Matrix(__lowercase , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self : Optional[Any] , __lowercase : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE__ : int =[]
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ : Optional[int] =[
self.__matrix[i][j] - other.component(__lowercase , __lowercase )
for j in range(self.__width )
]
matrix.append(__lowercase )
return Matrix(__lowercase , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self : Dict , __lowercase : float ) -> Matrix:
...
@overload
def __mul__( self : str , __lowercase : Vector ) -> Vector:
...
def __mul__( self : int , __lowercase : float | Vector ) -> Vector | Matrix:
if isinstance(__lowercase , __lowercase ): # matrix-vector
if len(__lowercase ) == self.__width:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =zero_vector(self.__height )
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ : List[str] =[
self.__matrix[i][j] * other.component(__lowercase )
for j in range(self.__width )
]
ans.change_component(__lowercase , sum(__lowercase ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(__lowercase , (int, float) ): # matrix-scalar
SCREAMING_SNAKE_CASE__ : List[str] =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__lowercase , self.__width , self.__height )
return None
def __magic_name__ ( self : Tuple ) -> int:
return self.__height
def __magic_name__ ( self : int ) -> int:
return self.__width
def __magic_name__ ( self : str , __lowercase : int , __lowercase : int ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def __magic_name__ ( self : List[str] , __lowercase : int , __lowercase : int , __lowercase : float ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE__ : Tuple =value
else:
raise Exception('''change_component: indices out of bounds''' )
def __magic_name__ ( self : Any , __lowercase : int , __lowercase : int ) -> float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
SCREAMING_SNAKE_CASE__ : List[Any] =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =minor[i][:y] + minor[i][y + 1 :]
return Matrix(__lowercase , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__lowercase , __lowercase )
else:
raise Exception('''Indices out of bounds''' )
def __magic_name__ ( self : List[Any] ) -> float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE__ : List[str] =[
self.__matrix[0][y] * self.cofactor(0 , __lowercase ) for y in range(self.__width )
]
return sum(__lowercase )
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =[[0] * n for _ in range(_SCREAMING_SNAKE_CASE )]
return Matrix(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
def _a( UpperCamelCase__ : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple ):
'''simple docstring'''
random.seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : Optional[int] =[
[random.randint(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )
]
return Matrix(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
| 701 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test'''
SCREAMING_SNAKE_CASE__ : int ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) , 81 )
def __magic_name__ ( self : Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE__ : List[Any] =[
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
| 665 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
a_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
a_ = [ord(letter) for letter in string.ascii_lowercase]
a_ = {ord(char) for char in VALID_CHARS}
a_ = ['the', 'be', 'to', 'of', 'and', 'in', 'that', 'have']
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =''''''
SCREAMING_SNAKE_CASE__ : List[str] =4_2
SCREAMING_SNAKE_CASE__ : Any =4_2
SCREAMING_SNAKE_CASE__ : Union[str, Any] =4_2
for keychar, cipherchar in zip(cycle(lowercase__ ), lowercase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase__ )
return decoded
def _a( UpperCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =[]
for key in product(lowercase__, repeat=3 ):
SCREAMING_SNAKE_CASE__ : Tuple =try_key(lowercase__, lowercase__ )
if encoded is not None:
possibles.append(lowercase__ )
return possibles
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def _a( UpperCamelCase__ : List[str] = "p059_cipher.txt" ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =4_2
SCREAMING_SNAKE_CASE__ : Dict =4_2
SCREAMING_SNAKE_CASE__ : List[str] =4_2
SCREAMING_SNAKE_CASE__ : Tuple =4_2
SCREAMING_SNAKE_CASE__ : Optional[Any] =Path(lowercase__ ).parent.joinpath(lowercase__ ).read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE__ : List[Any] =[int(lowercase__ ) for number in data.strip().split(''',''' )]
SCREAMING_SNAKE_CASE__ : List[str] =filter_valid_chars(lowercase__ )
for common_word in COMMON_WORDS:
SCREAMING_SNAKE_CASE__ : int =filter_common_word(lowercase__, lowercase__ )
if len(lowercase__ ) == 1:
break
SCREAMING_SNAKE_CASE__ : Tuple =possibles[0]
return sum(ord(lowercase__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 702 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size
SCREAMING_SNAKE_CASE__ : List[Any] =num_channels
SCREAMING_SNAKE_CASE__ : int =patch_size
SCREAMING_SNAKE_CASE__ : Tuple =num_frames
SCREAMING_SNAKE_CASE__ : List[Any] =is_training
SCREAMING_SNAKE_CASE__ : List[str] =use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : Any =scope
SCREAMING_SNAKE_CASE__ : int =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels
return config
def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )
# verify the logits shape
SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowercase )
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester(
self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int:
SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
pass
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def __magic_name__ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowercase )
@slow
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] =True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
SCREAMING_SNAKE_CASE__ : str =False
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : List[Any] =True
SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + 1 , len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self : Tuple ) -> List[Any]:
def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowercase ) , __lowercase )
SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[str] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Any ) -> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =prepare_video()
SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 665 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 1_6
a_ = 3_2
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any] = 1_6 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ : Any =load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCamelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : Optional[Any] =datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Any =tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCamelCase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : Optional[Any] =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : Tuple =1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Any =8
else:
SCREAMING_SNAKE_CASE__ : Dict =None
return tokenizer.pad(
lowerCAmelCase_, padding='''longest''', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='''pt''', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : List[Any] =DataLoader(
tokenized_datasets['''train'''], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : str =DataLoader(
tokenized_datasets['''validation'''], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', lowerCAmelCase_ ) == "1":
SCREAMING_SNAKE_CASE__ : Optional[Any] =2
# New Code #
SCREAMING_SNAKE_CASE__ : List[str] =int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : List[str] =Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=lowerCAmelCase_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Tuple =config['''lr''']
SCREAMING_SNAKE_CASE__ : Optional[int] =int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ : Dict =int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ : List[str] =evaluate.load('''glue''', '''mrpc''' )
set_seed(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Any =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : Tuple =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : Optional[Any] =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Any =AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : List[Any] =get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=1_0_0, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ : Tuple =accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =output.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ : str =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE__ : Tuple =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", lowerCAmelCase_ )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''', type=lowerCAmelCase_, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
SCREAMING_SNAKE_CASE__ : List[Any] =parser.parse_args()
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 703 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
a_ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Any =do_lower_case
SCREAMING_SNAKE_CASE__ : Any =strip_accents
SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 665 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a_ = False
a_ = False
def _a( UpperCamelCase__ : Namespace ):
'''simple docstring'''
return TrainCommand(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@staticmethod
def __magic_name__ ( __lowercase : ArgumentParser ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' )
SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =args.output
SCREAMING_SNAKE_CASE__ : str =args.column_label
SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text
SCREAMING_SNAKE_CASE__ : Tuple =args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split
SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size
SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate
SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon
def __magic_name__ ( self : Any ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __magic_name__ ( self : Optional[int] ) -> Tuple:
raise NotImplementedError
def __magic_name__ ( self : Dict ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 665 | 0 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , __lowercase : str , __lowercase : Dict=13 , __lowercase : Dict=7 , __lowercase : Any=True , __lowercase : Any=True , __lowercase : Optional[int]=True , __lowercase : Any=True , __lowercase : Dict=99 , __lowercase : int=64 , __lowercase : Dict=32 , __lowercase : int=5 , __lowercase : Union[str, Any]=4 , __lowercase : Optional[int]=37 , __lowercase : List[Any]="gelu" , __lowercase : Any=0.1 , __lowercase : Dict=0.1 , __lowercase : Union[str, Any]=5_12 , __lowercase : List[str]=16 , __lowercase : str=2 , __lowercase : Optional[int]=0.02 , __lowercase : Optional[int]=3 , __lowercase : Optional[int]=4 , __lowercase : Dict=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =parent
SCREAMING_SNAKE_CASE__ : Optional[Any] =batch_size
SCREAMING_SNAKE_CASE__ : str =seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] =is_training
SCREAMING_SNAKE_CASE__ : Tuple =use_input_mask
SCREAMING_SNAKE_CASE__ : int =use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : List[Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : Tuple =embedding_size
SCREAMING_SNAKE_CASE__ : List[Any] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] =intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] =type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[int] =initializer_range
SCREAMING_SNAKE_CASE__ : Any =num_labels
SCREAMING_SNAKE_CASE__ : Any =num_choices
SCREAMING_SNAKE_CASE__ : Optional[int] =scope
def __magic_name__ ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Dict =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Dict =None
SCREAMING_SNAKE_CASE__ : Tuple =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : str ) -> str:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def __magic_name__ ( self : int , __lowercase : List[str] , __lowercase : int , __lowercase : Tuple , __lowercase : int , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertModel(config=__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
SCREAMING_SNAKE_CASE__ : str =model(__snake_case , token_type_ids=__snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Tuple , __lowercase : int , __lowercase : str , __lowercase : Any , __lowercase : List[str] , __lowercase : str , __lowercase : Any , __lowercase : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =MegatronBertForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ : str =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForNextSentencePrediction(config=__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ : int =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : int , __lowercase : Tuple , __lowercase : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForPreTraining(config=__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Optional[int] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Dict , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : int , __lowercase : Tuple , __lowercase : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : int =self.num_labels
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : int , __lowercase : Dict , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ : Any =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.num_choices
SCREAMING_SNAKE_CASE__ : Tuple =MegatronBertForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : List[Any] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : List[str] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Dict =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE__
) : Optional[Any] =config_and_inputs
SCREAMING_SNAKE_CASE__ : List[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def __magic_name__ ( self : Any , __lowercase : Optional[Any] , __lowercase : str , __lowercase : List[Any]=False ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class in get_values(__snake_case ):
SCREAMING_SNAKE_CASE__ : int =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case )
SCREAMING_SNAKE_CASE__ : str =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def __magic_name__ ( self : Dict ) -> str:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case )
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case )
def __magic_name__ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case )
def __magic_name__ ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case )
def __magic_name__ ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case )
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case )
def __magic_name__ ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case )
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return torch.tensor(
a_, dtype=torch.long, device=a_, )
lowerCAmelCase_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ : Any =os.path.join(os.environ['''MYDIR'''] , __snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertModel.from_pretrained(__snake_case )
model.to(__snake_case )
model.half()
SCREAMING_SNAKE_CASE__ : List[str] =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any =model(__snake_case )[0]
SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __snake_case )
SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ : int =output[0, ii, jj]
SCREAMING_SNAKE_CASE__ : str =expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''ii={} jj={} a={} b={}'''.format(__snake_case , __snake_case , __snake_case , __snake_case )
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case ) , msg=__snake_case )
| 705 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = KandinskyVaaImgaImgPipeline
snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""]
snake_case_ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
snake_case_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[str] ) -> Tuple:
return 32
@property
def __magic_name__ ( self : List[str] ) -> str:
return 32
@property
def __magic_name__ ( self : Any ) -> Optional[int]:
return self.time_input_dim
@property
def __magic_name__ ( self : List[Any] ) -> int:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Tuple ) -> Optional[int]:
return 1_00
@property
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase )
return model
@property
def __magic_name__ ( self : Dict ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : str ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu'''
SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple =np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : int =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : int , *__lowercase : Any , **__lowercase : Union[str, Any] ) -> None:
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 706 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a_ = TypeVar('T')
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
snake_case_ = 42 # Cache store of keys
snake_case_ = 42 # References of the keys in cache
snake_case_ = 10 # Maximum capacity of cache
def __init__( self : Dict , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Any =deque()
SCREAMING_SNAKE_CASE__ : str =set()
if not n:
SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =n
def __magic_name__ ( self : List[str] , __lowercase : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> None:
for k in self.dq_store:
print(__lowercase )
def __repr__( self : List[Any] ) -> str:
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 665 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['OwlViTFeatureExtractor']
a_ = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float
for row in range(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col]
SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0]
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE__ : Tuple =0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, UpperCamelCase__ ):
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ )
]
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for x_val, y_val in enumerate(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE__ : Dict =y_val
SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ )
def interpolated_func(UpperCamelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase__ ) )
return interpolated_func
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Callable[[int], int]
SCREAMING_SNAKE_CASE__ : int
for poly in polynomials:
SCREAMING_SNAKE_CASE__ : Any =1
while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ):
x_val += 1
ret += poly(UpperCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 0 |
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int], ):
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left
SCREAMING_SNAKE_CASE__ : Optional[Any] =point
elif point > right:
SCREAMING_SNAKE_CASE__ : Optional[int] =right
SCREAMING_SNAKE_CASE__ : Tuple =point
else:
if item < current_item:
SCREAMING_SNAKE_CASE__ : str =point - 1
else:
SCREAMING_SNAKE_CASE__ : Tuple =point + 1
return None
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
a_ = 0
if debug == 1:
a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a_ = 6_7
a_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 665 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Dict , __lowercase : List[str]=2 , __lowercase : List[Any]=True , __lowercase : Union[str, Any]=False , __lowercase : int=10 , __lowercase : Tuple=3 , __lowercase : Any=32 * 4 , __lowercase : str=32 * 6 , __lowercase : str=4 , __lowercase : Union[str, Any]=32 , ) -> Any:
SCREAMING_SNAKE_CASE__ : List[Any] =parent
SCREAMING_SNAKE_CASE__ : str =batch_size
SCREAMING_SNAKE_CASE__ : str =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_auxiliary_loss
SCREAMING_SNAKE_CASE__ : str =num_queries
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_channels
SCREAMING_SNAKE_CASE__ : str =min_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_size
SCREAMING_SNAKE_CASE__ : Any =num_labels
SCREAMING_SNAKE_CASE__ : Dict =mask_feature_size
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE__ : str =(torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE__ : Dict =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __magic_name__ ( self : Tuple ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __magic_name__ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Tuple ={'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Dict ) -> int:
SCREAMING_SNAKE_CASE__ : List[Any] =output.encoder_hidden_states
SCREAMING_SNAKE_CASE__ : List[str] =output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_config.decoder_layers )
def __magic_name__ ( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any]=False ) -> Tuple:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MaskFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : int =model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any =model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def __magic_name__ ( self : Optional[Any] , __lowercase : int , __lowercase : Any , __lowercase : int , __lowercase : Any , __lowercase : int ) -> int:
SCREAMING_SNAKE_CASE__ : List[Any] =MaskFormerForInstanceSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowercase : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any =model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict =model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] =model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
snake_case_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case_ = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[Any] =MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def __magic_name__ ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def __magic_name__ ( self : List[str] ) -> Dict:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __magic_name__ ( self : Any ) -> List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] =model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def __magic_name__ ( self : Any ) -> Any:
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE__ : Dict =MaskFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __magic_name__ ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =(self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE__ : str ={
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowerCAmelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(),
}
SCREAMING_SNAKE_CASE__ : Tuple =MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict =model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def __magic_name__ ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def __magic_name__ ( self : Dict ) -> Dict:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE__ : str =self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Any =model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ : str =model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int =self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Any =True
SCREAMING_SNAKE_CASE__ : str =True
SCREAMING_SNAKE_CASE__ : str =model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ : Any =model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Optional[Any] =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE__ : Optional[int] =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Any =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a_ = 1E-4
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Dict ) -> int:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def __magic_name__ ( self : str ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : int =prepare_img()
SCREAMING_SNAKE_CASE__ : Dict =image_processor(_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict =inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple =torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def __magic_name__ ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict =(
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_lowerCAmelCase )
.eval()
)
SCREAMING_SNAKE_CASE__ : Tuple =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Dict =prepare_img()
SCREAMING_SNAKE_CASE__ : Any =image_processor(_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] =inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] =model(**_lowerCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ : int =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE__ : List[Any] =[
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
SCREAMING_SNAKE_CASE__ : int =torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[str] =(
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(_lowerCAmelCase )
.eval()
)
SCREAMING_SNAKE_CASE__ : Any =self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] =prepare_img()
SCREAMING_SNAKE_CASE__ : Dict =image_processor(_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple =inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] =model(**_lowerCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ : Dict =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE__ : List[str] =[[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
SCREAMING_SNAKE_CASE__ : List[str] =torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ : Dict =outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ : str =torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def __magic_name__ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =(
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_lowerCAmelCase )
.eval()
)
SCREAMING_SNAKE_CASE__ : Dict =self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[str] =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ : List[Any] =inputs['''pixel_values'''].to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict =[el.to(_lowerCAmelCase ) for el in inputs['''mask_labels''']]
SCREAMING_SNAKE_CASE__ : Tuple =[el.to(_lowerCAmelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] =model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 709 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowercase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@slow
@require_torch
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 665 | 0 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
a_ = False
try:
a_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , __lowercase : Any = None , __lowercase : Tuple = [] ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] =0
SCREAMING_SNAKE_CASE__ : Tuple =choices
SCREAMING_SNAKE_CASE__ : Optional[int] =prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ : List[Any] ="""*"""
else:
SCREAMING_SNAKE_CASE__ : Dict ="""➔ """
def __magic_name__ ( self : Tuple , __lowercase : str , __lowercase : int = "" ) -> List[str]:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _a )
else:
forceWrite(self.choices[index] , _a )
def __magic_name__ ( self : List[str] , __lowercase : Optional[Any] ) -> Tuple:
if index == self.position:
forceWrite(F" {self.arrow_char} " )
self.write_choice(_a )
else:
forceWrite(F" {self.choices[index]}" )
reset_cursor()
def __magic_name__ ( self : int , __lowercase : Dict , __lowercase : Dict = 1 ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_a )
move_cursor(_a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def __magic_name__ ( self : int ) -> Optional[int]:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def __magic_name__ ( self : Dict ) -> List[Any]:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_a )] for number in range(10 )] )
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE__ : Dict =index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _a )
else:
return
else:
return
def __magic_name__ ( self : List[str] , __lowercase : List[str] = 0 ) -> Optional[Any]:
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
SCREAMING_SNAKE_CASE__ : int =default_choice
for i in range(len(self.choices ) ):
self.print_choice(_a )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE__ : Dict =int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE__ : List[Any] =default_choice
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(_a , '''\n''' )
return choice
| 710 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = JukeboxTokenizer
snake_case_ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def __magic_name__ ( self : Optional[int] ) -> str:
import torch
SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : str =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __magic_name__ ( self : Any ) -> List[str]:
import torch
SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 665 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : list[Any] =[]
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
def __magic_name__ ( self : Any ) -> List[Any]:
return self.head == self.tail
def __magic_name__ ( self : List[str] , __lowercase : Any ) -> Dict:
self.data.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =self.tail + 1
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =self.data[self.head]
SCREAMING_SNAKE_CASE__ : List[str] =self.head + 1
return ret
def __magic_name__ ( self : Dict ) -> str:
return self.tail - self.head
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , __lowercase : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =data
SCREAMING_SNAKE_CASE__ : MyNode | None =None
SCREAMING_SNAKE_CASE__ : MyNode | None =None
SCREAMING_SNAKE_CASE__ : int =1
def __magic_name__ ( self : Dict ) -> List[str]:
return self.data
def __magic_name__ ( self : Optional[Any] ) -> Dict:
return self.left
def __magic_name__ ( self : Tuple ) -> Union[str, Any]:
return self.right
def __magic_name__ ( self : Tuple ) -> List[Any]:
return self.height
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : int =data
def __magic_name__ ( self : Tuple , __lowercase : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Any =node
def __magic_name__ ( self : List[Any] , __lowercase : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =node
def __magic_name__ ( self : Optional[Any] , __lowercase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple =height
def _a( UpperCamelCase__ : MyNode | None ):
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
if a > b:
return a
return b
def _a( UpperCamelCase__ : MyNode ):
'''simple docstring'''
print('''left rotation node:''', node.get_data() )
SCREAMING_SNAKE_CASE__ : Optional[int] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : int =my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCamelCase )
return ret
def _a( UpperCamelCase__ : MyNode ):
'''simple docstring'''
print('''right rotation node:''', node.get_data() )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] =my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] =my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCamelCase )
return ret
def _a( UpperCamelCase__ : MyNode ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =node.get_left()
assert left_child is not None
node.set_left(left_rotation(__UpperCamelCase ) )
return right_rotation(__UpperCamelCase )
def _a( UpperCamelCase__ : MyNode ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =node.get_right()
assert right_child is not None
node.set_right(right_rotation(__UpperCamelCase ) )
return left_rotation(__UpperCamelCase )
def _a( UpperCamelCase__ : MyNode | None, UpperCamelCase__ : Any ):
'''simple docstring'''
if node is None:
return MyNode(__UpperCamelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left(), __UpperCamelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE__ : List[Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE__ : int =right_rotation(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ : Dict =lr_rotation(__UpperCamelCase )
else:
node.set_right(insert_node(node.get_right(), __UpperCamelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE__ : List[Any] =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE__ : List[Any] =rl_rotation(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =left_rotation(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
return node
def _a( UpperCamelCase__ : MyNode ):
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Optional[Any] =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =right_child
return root.get_data()
def _a( UpperCamelCase__ : MyNode ):
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE__ : List[str] =left_child
return root.get_data()
def _a( UpperCamelCase__ : MyNode, UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =root.get_left()
SCREAMING_SNAKE_CASE__ : Optional[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE__ : str =get_left_most(__UpperCamelCase )
root.set_data(__UpperCamelCase )
root.set_right(del_node(__UpperCamelCase, __UpperCamelCase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE__ : str =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE__ : Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(__UpperCamelCase, __UpperCamelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__UpperCamelCase, __UpperCamelCase ) )
if get_height(__UpperCamelCase ) - get_height(__UpperCamelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE__ : int =left_rotation(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ : Any =rl_rotation(__UpperCamelCase )
elif get_height(__UpperCamelCase ) - get_height(__UpperCamelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE__ : Dict =right_rotation(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =lr_rotation(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : int =my_max(get_height(root.get_right() ), get_height(root.get_left() ) ) + 1
root.set_height(__UpperCamelCase )
return root
class __SCREAMING_SNAKE_CASE :
def __init__( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : MyNode | None =None
def __magic_name__ ( self : Optional[int] ) -> Tuple:
return get_height(self.root )
def __magic_name__ ( self : Any , __lowercase : int ) -> List[str]:
print('''insert:''' + str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ : Tuple =insert_node(self.root , UpperCamelCase__ )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple ) -> str:
print('''delete:''' + str(UpperCamelCase__ ) )
if self.root is None:
print('''Tree is empty!''' )
return
SCREAMING_SNAKE_CASE__ : Union[str, Any] =del_node(self.root , UpperCamelCase__ )
def __str__( self : int , ) -> str: # a level traversale, gives a more intuitive look on the tree
SCREAMING_SNAKE_CASE__ : Union[str, Any] =''''''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE__ : int =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE__ : List[Any] =0
while not q.is_empty():
SCREAMING_SNAKE_CASE__ : Optional[int] =q.pop()
SCREAMING_SNAKE_CASE__ : int =''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCamelCase__ )
q.push(UpperCamelCase__ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE__ : Dict =cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , UpperCamelCase__ ) - 1:
SCREAMING_SNAKE_CASE__ : Tuple =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _a( ):
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
a_ = AVLtree()
a_ = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 711 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_neox"""
def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : str =rotary_pct
SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE__ : str =classifier_dropout
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 665 | 0 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : int , __lowercase : Any , __lowercase : Any=2 , __lowercase : int=56 , __lowercase : Any=True , __lowercase : Any=True , __lowercase : int=True , __lowercase : Any=True , __lowercase : Tuple=99 , __lowercase : Any=32 , __lowercase : List[str]=2 , __lowercase : Union[str, Any]=2 , __lowercase : int=7 , __lowercase : List[str]="gelu_new" , __lowercase : int=0.1 , __lowercase : Dict=0.1 , __lowercase : List[Any]=5_12 , __lowercase : List[Any]=16 , __lowercase : Any=2 , __lowercase : Tuple=0.02 , __lowercase : Union[str, Any]=4 , __lowercase : Tuple="block_sparse" , __lowercase : Dict=True , __lowercase : List[Any]=False , __lowercase : Union[str, Any]=2 , __lowercase : int=3 , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Dict =parent
SCREAMING_SNAKE_CASE__ : int =batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] =seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] =is_training
SCREAMING_SNAKE_CASE__ : Dict =use_attention_mask
SCREAMING_SNAKE_CASE__ : str =use_token_type_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] =use_labels
SCREAMING_SNAKE_CASE__ : Dict =vocab_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : int =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple =max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : int =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str =initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_choices
SCREAMING_SNAKE_CASE__ : Optional[int] =rescale_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] =attention_type
SCREAMING_SNAKE_CASE__ : List[str] =use_bias
SCREAMING_SNAKE_CASE__ : Optional[int] =block_size
SCREAMING_SNAKE_CASE__ : Tuple =num_random_blocks
def __magic_name__ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[Any] =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] =BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Any ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase ):
snake_case_ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple =FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __magic_name__ ( self : Dict ) -> Optional[int]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __magic_name__ ( self : Optional[int] ) -> str:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __magic_name__ ( self : int ) -> Optional[Any]:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __magic_name__ ( self : int ) -> Dict:
super().test_hidden_states_output()
@slow
def __magic_name__ ( self : List[str] ) -> int:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int =model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(_lowerCamelCase )
def __magic_name__ ( self : Optional[Any] ) -> List[str]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ : Any =self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int =model_class(_lowerCamelCase )
@jax.jit
def model_jitted(__lowercase : int , __lowercase : Any=None , **__lowercase : Tuple ):
return model(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , **_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE__ : str =model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ : List[str] =model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __magic_name__ ( self : Dict , __lowercase : Any , __lowercase : Dict , __lowercase : Dict , __lowercase : List[str]=1e-5 , __lowercase : List[str]="outputs" , __lowercase : Dict=None ) -> Optional[int]:
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : int = 4 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =abs(__snake_case ) or 4
return [[1 + x + y * row_size for x in range(__snake_case )] for y in range(__snake_case )]
def _a( UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
return reverse_row(transpose(__snake_case ) )
# OR.. transpose(reverse_column(matrix))
def _a( UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
return reverse_row(reverse_column(__snake_case ) )
# OR.. reverse_column(reverse_row(matrix))
def _a( UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
return reverse_column(transpose(__snake_case ) )
# OR.. transpose(reverse_row(matrix))
def _a( UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[list(__snake_case ) for x in zip(*__snake_case )]
return matrix
def _a( UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =matrix[::-1]
return matrix
def _a( UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =[x[::-1] for x in matrix]
return matrix
def _a( UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
for i in matrix:
print(*__snake_case )
if __name__ == "__main__":
a_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
a_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
a_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 713 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __SCREAMING_SNAKE_CASE :
def __magic_name__ ( self : str , __lowercase : List[str] ) -> List[Any]:
raise NotImplementedError()
def __magic_name__ ( self : Optional[Any] ) -> str:
raise NotImplementedError()
class __SCREAMING_SNAKE_CASE ( __lowercase ):
def __init__( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Dict = False , **__lowercase : str ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer
SCREAMING_SNAKE_CASE__ : List[str] =skip_prompt
SCREAMING_SNAKE_CASE__ : Optional[int] =decode_kwargs
# variables used in the streaming process
SCREAMING_SNAKE_CASE__ : Any =[]
SCREAMING_SNAKE_CASE__ : List[Any] =0
SCREAMING_SNAKE_CASE__ : Dict =True
def __magic_name__ ( self : Union[str, Any] , __lowercase : int ) -> int:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
SCREAMING_SNAKE_CASE__ : List[Any] =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
SCREAMING_SNAKE_CASE__ : int =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =text[self.print_len :]
SCREAMING_SNAKE_CASE__ : List[str] =[]
SCREAMING_SNAKE_CASE__ : Tuple =0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
SCREAMING_SNAKE_CASE__ : Dict =text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def __magic_name__ ( self : Optional[Any] ) -> int:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
SCREAMING_SNAKE_CASE__ : Any =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
SCREAMING_SNAKE_CASE__ : List[str] =text[self.print_len :]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[]
SCREAMING_SNAKE_CASE__ : str =0
else:
SCREAMING_SNAKE_CASE__ : List[Any] =''''''
SCREAMING_SNAKE_CASE__ : str =True
self.on_finalized_text(__A , stream_end=__A )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Tuple = False ) -> Tuple:
print(__A , flush=__A , end='''''' if not stream_end else None )
def __magic_name__ ( self : Optional[Any] , __lowercase : Union[str, Any] ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
class __SCREAMING_SNAKE_CASE ( __lowercase ):
def __init__( self : int , __lowercase : Union[str, Any] , __lowercase : List[Any] = False , __lowercase : int = None , **__lowercase : int ) -> List[Any]:
super().__init__(__A , __A , **__A )
SCREAMING_SNAKE_CASE__ : int =Queue()
SCREAMING_SNAKE_CASE__ : Dict =None
SCREAMING_SNAKE_CASE__ : Any =timeout
def __magic_name__ ( self : str , __lowercase : Any , __lowercase : str = False ) -> Any:
self.text_queue.put(__A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Dict ) -> Optional[int]:
return self
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 714 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715 |
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] =True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if a[i].islower():
SCREAMING_SNAKE_CASE__ : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : str =0
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : Dict ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int =Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
SCREAMING_SNAKE_CASE__ : int =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : Optional[Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int =Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Tuple =Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
SCREAMING_SNAKE_CASE__ : Optional[int] =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : List[Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] =CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE__ : Optional[Any] =Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Tuple =Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE__ : str =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ).to_dict()
config_dict.pop('''image_processor_type''' )
SCREAMING_SNAKE_CASE__ : Dict =CLIPImageProcessor(**__SCREAMING_SNAKE_CASE )
# save in new folder
model_config.save_pretrained(__SCREAMING_SNAKE_CASE )
config.save_pretrained(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : List[str] =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ : List[str] =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : Optional[Any] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] =Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
SCREAMING_SNAKE_CASE__ : List[str] =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : Any ) -> Optional[Any]:
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''clip-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ : int =AutoImageProcessor.from_pretrained('''clip-base''' )
def __magic_name__ ( self : int ) -> Any:
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ : Dict =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def __magic_name__ ( self : int ) -> Dict:
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE__ : List[Any] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ : int =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ : Optional[int] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : Dict =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __magic_name__ ( self : Any ) -> str:
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE )
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] =Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Dict =Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
SCREAMING_SNAKE_CASE__ : Any =CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : int =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __magic_name__ ( self : Optional[int] ) -> int:
class __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
snake_case_ = True
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE )
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ : int =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__SCREAMING_SNAKE_CASE , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 716 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : Any =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : Dict =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : int =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : Any =embedding_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : str =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE__ : str =num_labels
SCREAMING_SNAKE_CASE__ : List[str] =num_choices
SCREAMING_SNAKE_CASE__ : List[str] =scope
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.num_choices
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return torch.tensor(
UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase )
model.to(__lowercase )
model.half()
SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj]
SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
| 665 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 665 | 0 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a_ = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
a_ = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =create_model(
'''HTSAT-tiny''', '''roberta''', _lowercase, precision='''fp32''', device='''cuda:0''' if torch.cuda.is_available() else '''cpu''', enable_fusion=_lowercase, fusion_type='''aff_2d''' if enable_fusion else None, )
return model, model_cfg
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
SCREAMING_SNAKE_CASE__ : str =R'''.*sequential.(\d+).*'''
SCREAMING_SNAKE_CASE__ : str =R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE__ : Tuple =key.replace(_lowercase, _lowercase )
if re.match(_lowercase, _lowercase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE__ : List[Any] =re.match(_lowercase, _lowercase ).group(1 )
SCREAMING_SNAKE_CASE__ : int =key.replace(f"sequential.{sequential_layer}.", f"layers.{int(_lowercase )//3}.linear." )
elif re.match(_lowercase, _lowercase ):
SCREAMING_SNAKE_CASE__ : int =int(re.match(_lowercase, _lowercase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE__ : Optional[Any] =1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE__ : Optional[int] =key.replace(f"_projection.{projecton_layer}.", f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE__ : str =value
SCREAMING_SNAKE_CASE__ : Optional[Any] =mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE__ : Any =mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE__ : List[str] =mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE__ : int =mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =query_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =key_layer
SCREAMING_SNAKE_CASE__ : Any =value_layer
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =value
return model_state_dict
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : str=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =init_clap(_lowercase, enable_fusion=_lowercase )
clap_model.eval()
SCREAMING_SNAKE_CASE__ : Any =clap_model.state_dict()
SCREAMING_SNAKE_CASE__ : Optional[int] =rename_state_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : str =ClapConfig()
SCREAMING_SNAKE_CASE__ : List[str] =enable_fusion
SCREAMING_SNAKE_CASE__ : Optional[int] =ClapModel(_lowercase )
# ignore the spectrogram embedding layer
model.load_state_dict(_lowercase, strict=_lowercase )
model.save_pretrained(_lowercase )
transformers_config.save_pretrained(_lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
a_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 718 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 0 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 665 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =int(lowercase_ )
if n_element < 1:
SCREAMING_SNAKE_CASE__ : Optional[int] =ValueError('''a should be a positive number''' )
raise my_error
SCREAMING_SNAKE_CASE__ : int =[1]
SCREAMING_SNAKE_CASE__ : str =(0, 0, 0)
SCREAMING_SNAKE_CASE__ : Union[str, Any] =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2, hamming_list[j] * 3, hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
a_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
a_ = hamming(int(n))
print('-----------------------------------------------------')
print(F'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 720 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =size
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size
SCREAMING_SNAKE_CASE__ : str =[0] * size
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =value
while index < self.size:
SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ : List[str] =value
else:
SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ : str =0
while left <= right:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase )
if left <= current_left:
SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] )
SCREAMING_SNAKE_CASE__ : Any =current_left
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
snake_case_ = 'megatron-bert'
def __init__( self : Optional[Any] , __lowercase : List[str]=2_90_56 , __lowercase : List[Any]=10_24 , __lowercase : List[str]=24 , __lowercase : Union[str, Any]=16 , __lowercase : Tuple=40_96 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Dict=0.1 , __lowercase : str=5_12 , __lowercase : Optional[int]=2 , __lowercase : Optional[int]=0.02 , __lowercase : Any=1e-12 , __lowercase : Optional[int]=0 , __lowercase : int="absolute" , __lowercase : List[Any]=True , **__lowercase : Optional[Any] , ) -> Any:
super().__init__(pad_token_id=_a , **_a )
SCREAMING_SNAKE_CASE__ : List[str] =vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : Any =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict =num_attention_heads
SCREAMING_SNAKE_CASE__ : Any =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] =position_embedding_type
SCREAMING_SNAKE_CASE__ : List[str] =use_cache
| 721 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""vqvae"""]
def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase )
def __magic_name__ ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , __lowercase ) else 10_00
@torch.no_grad()
def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowercase , device=self.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =noise
SCREAMING_SNAKE_CASE__ : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample(
generator=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample''']
else:
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample''']
if isinstance(self.scheduler , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE__ : Any =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) )
@torch.no_grad()
def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowercase )
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : int =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
| 665 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = """Hello, World!"""
a_ = """en_XX"""
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : bool ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =Path('''data_bin''' )
SCREAMING_SNAKE_CASE__ : Any =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCamelCase ).parent ), checkpoint_file=Path(_lowerCamelCase ).name, _name='''xmod_base''', arch='''xmod_base''', task='''multilingual_masked_lm''', data_name_or_path=str(_lowerCamelCase ), bpe='''sentencepiece''', sentencepiece_model=str(Path(_lowerCamelCase ).parent / '''sentencepiece.bpe.model''' ), src_dict=str(data_dir / '''dict.txt''' ), )
xmod.eval() # disable dropout
print(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] =xmod.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Optional[int] =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings, hidden_size=xmod.cfg.model.encoder_embed_dim, num_hidden_layers=xmod.cfg.model.encoder_layers, num_attention_heads=xmod.cfg.model.encoder_attention_heads, intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=5_1_4, type_vocab_size=1, layer_norm_eps=1e-5, pre_norm=xmod.cfg.model.encoder_normalize_before, adapter_reduction_factor=getattr(xmod.cfg.model, '''bottleneck''', 2 ), adapter_layer_norm=xmod.cfg.model.adapter_layer_norm, adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm, ln_before_adapter=xmod.cfg.model.ln_before_adapter, languages=xmod.cfg.model.languages, )
if classification_head:
SCREAMING_SNAKE_CASE__ : List[str] =xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''', _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =XmodForSequenceClassification(_lowerCamelCase ) if classification_head else XmodForMaskedLM(_lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : List[str] =xmod_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : str =xmod_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Any =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[Any] =xmod_sent_encoder.layernorm_embedding.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : Dict =model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : List[str] =xmod_sent_encoder.layers[i]
# self attention
SCREAMING_SNAKE_CASE__ : List[str] =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =xmod_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] =xmod_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : int =xmod_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] =xmod_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : str =xmod_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple =xmod_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : Optional[int] =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
SCREAMING_SNAKE_CASE__ : List[str] =xmod_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] =xmod_layer.self_attn.out_proj.bias
SCREAMING_SNAKE_CASE__ : Any =xmod_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] =xmod_layer.self_attn_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : Optional[int] =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
SCREAMING_SNAKE_CASE__ : Tuple =xmod_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple =xmod_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : str =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
SCREAMING_SNAKE_CASE__ : List[str] =xmod_layer.fca.weight
SCREAMING_SNAKE_CASE__ : str =xmod_layer.fca.bias
SCREAMING_SNAKE_CASE__ : Any =xmod_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Tuple =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
SCREAMING_SNAKE_CASE__ : List[Any] =xmod_layer.adapter_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] =bert_output.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE__ : int =xmod_layer.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE__ : List[str] =from_adapter.fca.weight
SCREAMING_SNAKE_CASE__ : List[str] =from_adapter.fca.bias
SCREAMING_SNAKE_CASE__ : int =from_adapter.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
SCREAMING_SNAKE_CASE__ : List[Any] =xmod_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] =xmod_sent_encoder.layer_norm.bias
if classification_head:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =xmod.model.classification_heads['''mnli'''].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] =xmod.model.classification_heads['''mnli'''].dense.bias
SCREAMING_SNAKE_CASE__ : int =xmod.model.classification_heads['''mnli'''].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] =xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : int =xmod.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] =xmod.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] =xmod.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : int =xmod.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] =xmod.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[Any] =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : Optional[int] =xmod.encode(_lowerCamelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] =model(_lowerCamelCase )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Optional[int] =xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowerCamelCase ) )
else:
SCREAMING_SNAKE_CASE__ : Any =xmod.model(_lowerCamelCase, lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape, their_output.shape )
SCREAMING_SNAKE_CASE__ : Any =torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 )
print('''Do both models output the same tensors?''', '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowerCamelCase ).mkdir(parents=_lowerCamelCase, exist_ok=_lowerCamelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
a_ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 700 |
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
print('''The following activities are selected:''' )
# The first activity is always selected
SCREAMING_SNAKE_CASE__ : Tuple =0
print(UpperCamelCase__, end=''',''' )
# Consider rest of the activities
for j in range(UpperCamelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCamelCase__, end=''',''' )
SCREAMING_SNAKE_CASE__ : List[Any] =j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [1, 3, 0, 5, 8, 5]
a_ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 701 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test'''
SCREAMING_SNAKE_CASE__ : int ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) , 81 )
def __magic_name__ ( self : Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE__ : List[Any] =[
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
| 665 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE__ : Any =DisjunctiveConstraint(lowercase__ )
self.assertTrue(isinstance(dc.token_ids , lowercase__ ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __magic_name__ ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =[[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(lowercase__ ) # fails here
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =[[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE__ : Optional[Any] =DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict =dc.update(1 )
SCREAMING_SNAKE_CASE__ : Optional[int] =stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE__ : Tuple =dc.update(2 )
SCREAMING_SNAKE_CASE__ : Tuple =stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE__ : Any =dc.update(3 )
SCREAMING_SNAKE_CASE__ : Tuple =stepped is True and completed is True and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =[[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE__ : Dict =DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE__ : List[Any] =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE__ : Dict =dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE__ : List[Any] =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE__ : Tuple =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE__ : str =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE__ : str =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 702 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size
SCREAMING_SNAKE_CASE__ : List[Any] =num_channels
SCREAMING_SNAKE_CASE__ : int =patch_size
SCREAMING_SNAKE_CASE__ : Tuple =num_frames
SCREAMING_SNAKE_CASE__ : List[Any] =is_training
SCREAMING_SNAKE_CASE__ : List[str] =use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : Any =scope
SCREAMING_SNAKE_CASE__ : int =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels
return config
def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )
# verify the logits shape
SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowercase )
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester(
self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int:
SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
pass
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def __magic_name__ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowercase )
@slow
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] =True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
SCREAMING_SNAKE_CASE__ : str =False
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : List[Any] =True
SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + 1 , len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self : Tuple ) -> List[Any]:
def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowercase ) , __lowercase )
SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[str] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Any ) -> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =prepare_video()
SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 665 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
a_ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Any =do_lower_case
SCREAMING_SNAKE_CASE__ : Any =strip_accents
SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 665 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __SCREAMING_SNAKE_CASE ( __A ):
snake_case_ = """fnet"""
def __init__( self : Optional[Any] , __lowercase : Optional[int]=3_20_00 , __lowercase : Tuple=7_68 , __lowercase : Dict=12 , __lowercase : Any=30_72 , __lowercase : str="gelu_new" , __lowercase : Any=0.1 , __lowercase : int=5_12 , __lowercase : List[Any]=4 , __lowercase : str=0.02 , __lowercase : List[Any]=1e-12 , __lowercase : Optional[int]=False , __lowercase : Dict=5_12 , __lowercase : Dict=3 , __lowercase : List[Any]=1 , __lowercase : Tuple=2 , **__lowercase : Tuple , ) -> Any:
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : List[str] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =intermediate_size
SCREAMING_SNAKE_CASE__ : Any =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =initializer_range
SCREAMING_SNAKE_CASE__ : str =type_vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple =use_tpu_fourier_optimizations
SCREAMING_SNAKE_CASE__ : Optional[Any] =tpu_short_seq_length
| 704 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a_ = False
a_ = False
def _a( UpperCamelCase__ : Namespace ):
'''simple docstring'''
return TrainCommand(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@staticmethod
def __magic_name__ ( __lowercase : ArgumentParser ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' )
SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =args.output
SCREAMING_SNAKE_CASE__ : str =args.column_label
SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text
SCREAMING_SNAKE_CASE__ : Tuple =args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split
SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size
SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate
SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon
def __magic_name__ ( self : Any ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __magic_name__ ( self : Optional[int] ) -> Tuple:
raise NotImplementedError
def __magic_name__ ( self : Dict ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 665 | 0 |
'''simple docstring'''
from maths.prime_check import is_prime
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if not isinstance(__snake_case, __snake_case ):
SCREAMING_SNAKE_CASE__ : int =f"Input value of [number={number}] must be an integer"
raise TypeError(__snake_case )
if is_prime(__snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = KandinskyVaaImgaImgPipeline
snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""]
snake_case_ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
snake_case_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[str] ) -> Tuple:
return 32
@property
def __magic_name__ ( self : List[str] ) -> str:
return 32
@property
def __magic_name__ ( self : Any ) -> Optional[int]:
return self.time_input_dim
@property
def __magic_name__ ( self : List[Any] ) -> int:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Tuple ) -> Optional[int]:
return 1_00
@property
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase )
return model
@property
def __magic_name__ ( self : Dict ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : str ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu'''
SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple =np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : int =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a_ = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@classmethod
def __magic_name__ ( cls : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def __magic_name__ ( cls : List[Any] ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __magic_name__ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE__ : Any =FlaxBertModel(snake_case__ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : str =FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
SCREAMING_SNAKE_CASE__ : Dict =flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE__ : List[str] =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE__ : Optional[Any] =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case__ , repo_id='''test-model-flax''' , push_to_hub=snake_case__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] =FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
SCREAMING_SNAKE_CASE__ : str =flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE__ : str =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1e-3 , msg=F"{key} not identical" )
def __magic_name__ ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ : str =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE__ : str =FlaxBertModel(snake_case__ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Tuple =FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
SCREAMING_SNAKE_CASE__ : Tuple =flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE__ : Any =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE__ : Any =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
snake_case__ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=snake_case__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : str =FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE__ : Tuple =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE__ : Any =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case__ , 1e-3 , msg=F"{key} not identical" )
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =flatten_dict(modela.params )
SCREAMING_SNAKE_CASE__ : str =flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
SCREAMING_SNAKE_CASE__ : int =False
return models_are_equal
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] =BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
SCREAMING_SNAKE_CASE__ : List[str] =FlaxBertModel(snake_case__ )
SCREAMING_SNAKE_CASE__ : Any ='''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case__ , snake_case__ ) )
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE__ : int =FlaxBertModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertTrue(check_models_equal(snake_case__ , snake_case__ ) )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =FlaxBertModel(snake_case__ )
SCREAMING_SNAKE_CASE__ : Any ='''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case__ , snake_case__ ) , max_shard_size='''10KB''' )
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE__ : Any =FlaxBertModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertTrue(check_models_equal(snake_case__ , snake_case__ ) )
def __magic_name__ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''bert'''
SCREAMING_SNAKE_CASE__ : str ='''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =FlaxBertModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertIsNotNone(snake_case__ )
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple ='''bert'''
SCREAMING_SNAKE_CASE__ : Optional[int] ='''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =FlaxBertModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.assertIsNotNone(snake_case__ )
| 706 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a_ = TypeVar('T')
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
snake_case_ = 42 # Cache store of keys
snake_case_ = 42 # References of the keys in cache
snake_case_ = 10 # Maximum capacity of cache
def __init__( self : Dict , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Any =deque()
SCREAMING_SNAKE_CASE__ : str =set()
if not n:
SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =n
def __magic_name__ ( self : List[str] , __lowercase : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> None:
for k in self.dq_store:
print(__lowercase )
def __repr__( self : List[Any] ) -> str:
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 665 | 0 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 1_6
a_ = 3_2
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Union[str, Any] = 1_6 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ : Tuple =load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCamelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : Tuple =datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : List[Any] =tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCamelCase__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : Tuple =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : Optional[int] =1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Optional[int] =8
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =None
return tokenizer.pad(
lowerCAmelCase_, padding='''longest''', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='''pt''', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : str =DataLoader(
tokenized_datasets['''train'''], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_, drop_last=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : int =DataLoader(
tokenized_datasets['''validation'''], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_, drop_last=(accelerator.mixed_precision == '''fp8'''), )
return train_dataloader, eval_dataloader
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Union[str, Any] =config["lr"]
SCREAMING_SNAKE_CASE__ : Any =int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ : int =int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ : List[Any] =int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ : str =evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE__ : str =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE__ : str =batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : Any =MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : str =AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Tuple =get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=1_0_0, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ : int =accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ : Any =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple =outputs.loss
SCREAMING_SNAKE_CASE__ : int =loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : str =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ : Any =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE__ : List[str] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", lowerCAmelCase_ )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
SCREAMING_SNAKE_CASE__ : str =parser.parse_args()
SCREAMING_SNAKE_CASE__ : Tuple ={"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 707 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float
for row in range(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col]
SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0]
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE__ : Tuple =0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, UpperCamelCase__ ):
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ )
]
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for x_val, y_val in enumerate(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE__ : Dict =y_val
SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ )
def interpolated_func(UpperCamelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase__ ) )
return interpolated_func
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Callable[[int], int]
SCREAMING_SNAKE_CASE__ : int
for poly in polynomials:
SCREAMING_SNAKE_CASE__ : Any =1
while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ):
x_val += 1
ret += poly(UpperCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
if not isinstance(_snake_case, _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left
SCREAMING_SNAKE_CASE__ : Optional[Any] =point
elif point > right:
SCREAMING_SNAKE_CASE__ : Optional[int] =right
SCREAMING_SNAKE_CASE__ : Tuple =point
else:
if item < current_item:
SCREAMING_SNAKE_CASE__ : str =point - 1
else:
SCREAMING_SNAKE_CASE__ : Tuple =point + 1
return None
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
a_ = 0
if debug == 1:
a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a_ = 6_7
a_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 665 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'nielsr/canine-s': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
a_ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
a_ = 0
a_ = 0xE000
a_ = 0xE001
a_ = 0xE002
a_ = 0xE003
a_ = 0xE004
# Maps special codepoints to human-readable names.
a_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
a_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , __lowercase : Optional[Any]=chr(_SCREAMING_SNAKE_CASE ) , __lowercase : Any=chr(_SCREAMING_SNAKE_CASE ) , __lowercase : str=chr(_SCREAMING_SNAKE_CASE ) , __lowercase : Optional[int]=chr(_SCREAMING_SNAKE_CASE ) , __lowercase : List[Any]=chr(_SCREAMING_SNAKE_CASE ) , __lowercase : List[Any]=chr(_SCREAMING_SNAKE_CASE ) , __lowercase : Optional[int]=False , __lowercase : int=20_48 , **__lowercase : Union[str, Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token
SCREAMING_SNAKE_CASE__ : Optional[int] =AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token
SCREAMING_SNAKE_CASE__ : Tuple =AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token
SCREAMING_SNAKE_CASE__ : str =AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token
SCREAMING_SNAKE_CASE__ : Optional[int] =AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : Optional[int] =AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , model_max_length=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE__ : Dict ={}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE__ : Any =codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE__ : Any ={
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE__ : Dict =UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(self._special_codepoints )
@property
def __magic_name__ ( self : Optional[Any] ) -> int:
return self._unicode_vocab_size
def __magic_name__ ( self : Optional[Any] , __lowercase : Dict ) -> List[str]:
return list(_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : Any , __lowercase : Optional[Any] ) -> int:
try:
return ord(_SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(F"invalid token: \'{token}\'" )
def __magic_name__ ( self : Dict , __lowercase : Union[str, Any] ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(F"invalid id: {index}" )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Union[str, Any] ) -> Tuple:
return "".join(_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : List[Any] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Any =[self.cls_token_id]
SCREAMING_SNAKE_CASE__ : Tuple =cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __magic_name__ ( self : str , __lowercase : Tuple , __lowercase : List[Any] = None , __lowercase : Optional[int] = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : List[Any] =[1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
result += ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return result
def __magic_name__ ( self : int , __lowercase : Any , __lowercase : int = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
SCREAMING_SNAKE_CASE__ : int =len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __magic_name__ ( self : Optional[int] , __lowercase : str , __lowercase : Any = None ) -> Union[str, Any]:
return ()
| 709 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowercase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@slow
@require_torch
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 665 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
a_ = True
except (ImportError, AttributeError):
a_ = object
def _a( *UpperCamelCase__ : List[Any], **UpperCamelCase__ : Dict ):
'''simple docstring'''
pass
a_ = False
a_ = logging.get_logger('transformers-cli/serving')
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(A_, args.host, args.port, args.workers )
class __SCREAMING_SNAKE_CASE ( _A ):
snake_case_ = 42
class __SCREAMING_SNAKE_CASE ( _A ):
snake_case_ = 42
snake_case_ = 42
class __SCREAMING_SNAKE_CASE ( _A ):
snake_case_ = 42
class __SCREAMING_SNAKE_CASE ( _A ):
snake_case_ = 42
class __SCREAMING_SNAKE_CASE ( _A ):
@staticmethod
def __magic_name__ ( __lowercase : ArgumentParser ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int =parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=UpperCamelCase__ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=UpperCamelCase__ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=UpperCamelCase__ , default=88_88 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=UpperCamelCase__ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=UpperCamelCase__ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=UpperCamelCase__ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=UpperCamelCase__ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=UpperCamelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self : Optional[Any] , __lowercase : Pipeline , __lowercase : str , __lowercase : int , __lowercase : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple =pipeline
SCREAMING_SNAKE_CASE__ : Tuple =host
SCREAMING_SNAKE_CASE__ : Optional[int] =port
SCREAMING_SNAKE_CASE__ : Tuple =workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install \"transformers[serving]\".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"Serving model over {host}:{port}" )
SCREAMING_SNAKE_CASE__ : Optional[int] =FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=['''POST'''] , ),
] , timeout=6_00 , )
def __magic_name__ ( self : Tuple ) -> str:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __magic_name__ ( self : int ) -> List[Any]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __magic_name__ ( self : List[Any] , __lowercase : str = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , __lowercase : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) ) -> int:
try:
SCREAMING_SNAKE_CASE__ : str =self._pipeline.tokenizer.tokenize(UpperCamelCase__ )
if return_ids:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self._pipeline.tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
return ServeTokenizeResult(tokens=UpperCamelCase__ , tokens_ids=UpperCamelCase__ )
else:
return ServeTokenizeResult(tokens=UpperCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(UpperCamelCase__ )} )
def __magic_name__ ( self : List[Any] , __lowercase : List[int] = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , __lowercase : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , __lowercase : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , ) -> List[Any]:
try:
SCREAMING_SNAKE_CASE__ : int =self._pipeline.tokenizer.decode(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return ServeDeTokenizeResult(model='''''' , text=UpperCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(UpperCamelCase__ )} )
async def __magic_name__ ( self : List[str] , __lowercase : Optional[Any]=Body(UpperCamelCase__ , embed=UpperCamelCase__ ) ) -> Dict:
if len(UpperCamelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
SCREAMING_SNAKE_CASE__ : Optional[Any] =self._pipeline(UpperCamelCase__ )
return ServeForwardResult(output=UpperCamelCase__ )
except Exception as e:
raise HTTPException(5_00 , {'''error''': str(UpperCamelCase__ )} )
| 710 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = JukeboxTokenizer
snake_case_ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def __magic_name__ ( self : Optional[int] ) -> str:
import torch
SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : str =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __magic_name__ ( self : Any ) -> List[str]:
import torch
SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 665 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ):
snake_case_ = '''dinat'''
snake_case_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[Any] , __lowercase : Optional[int]=4 , __lowercase : int=3 , __lowercase : Tuple=64 , __lowercase : int=[3, 4, 6, 5] , __lowercase : int=[2, 4, 8, 16] , __lowercase : str=7 , __lowercase : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __lowercase : List[Any]=3.0 , __lowercase : int=True , __lowercase : Optional[Any]=0.0 , __lowercase : Union[str, Any]=0.0 , __lowercase : str=0.1 , __lowercase : List[Any]="gelu" , __lowercase : str=0.02 , __lowercase : str=1e-5 , __lowercase : Any=0.0 , __lowercase : Dict=None , __lowercase : Any=None , **__lowercase : int , ) -> Optional[Any]:
super().__init__(**_A )
SCREAMING_SNAKE_CASE__ : Optional[int] =patch_size
SCREAMING_SNAKE_CASE__ : Dict =num_channels
SCREAMING_SNAKE_CASE__ : str =embed_dim
SCREAMING_SNAKE_CASE__ : str =depths
SCREAMING_SNAKE_CASE__ : Dict =len(_A )
SCREAMING_SNAKE_CASE__ : Any =num_heads
SCREAMING_SNAKE_CASE__ : Any =kernel_size
SCREAMING_SNAKE_CASE__ : List[str] =dilations
SCREAMING_SNAKE_CASE__ : Union[str, Any] =mlp_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] =qkv_bias
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =drop_path_rate
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Union[str, Any] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] =initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ : List[str] =int(embed_dim * 2 ** (len(_A ) - 1) )
SCREAMING_SNAKE_CASE__ : Dict =layer_scale_init_value
SCREAMING_SNAKE_CASE__ : int =['stem'] + [F"stage{idx}" for idx in range(1 , len(_A ) + 1 )]
SCREAMING_SNAKE_CASE__ : Tuple =get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
| 711 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_neox"""
def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : str =rotary_pct
SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE__ : str =classifier_dropout
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 665 | 0 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
def is_in_circle(UpperCamelCase__ : Any, UpperCamelCase__ : Any ) -> bool:
SCREAMING_SNAKE_CASE__ : List[Any] =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE__ : List[str] =mean(
int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) )
for _ in range(__snake_case ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE__ : int =proportion * 4
print(f"The estimated value of pi is {pi_estimate}" )
print(f"The numpy value of pi is {pi}" )
print(f"The total error is {abs(pi - pi_estimate )}" )
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict, UpperCamelCase__ : List[str] = 0.0, UpperCamelCase__ : Optional[int] = 1.0, ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__snake_case, __snake_case ) ) for _ in range(__snake_case ) ) * (max_value - min_value)
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : int = 0.0, UpperCamelCase__ : Any = 1.0 ):
'''simple docstring'''
def identity_function(UpperCamelCase__ : Dict ) -> float:
return x
SCREAMING_SNAKE_CASE__ : Tuple =area_under_curve_estimator(
__snake_case, __snake_case, __snake_case, __snake_case )
SCREAMING_SNAKE_CASE__ : int =(max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {expected_value}" )
print(f"Total error is {abs(estimated_value - expected_value )}" )
print('''******************''' )
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
def function_to_integrate(UpperCamelCase__ : Union[str, Any] ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE__ : Tuple =area_under_curve_estimator(
__snake_case, __snake_case, 0.0, 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {pi}" )
print(f"Total error is {abs(estimated_value - pi )}" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
'''simple docstring'''
import functools
from typing import Any
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
if not isinstance(lowerCamelCase_, lowerCamelCase_ ) or len(lowerCamelCase_ ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(lowerCamelCase_, lowerCamelCase_ ) or not all(
isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
SCREAMING_SNAKE_CASE__ : dict[str, Any] ={}
SCREAMING_SNAKE_CASE__ : Tuple ="""WORD_KEEPER"""
for word in words:
SCREAMING_SNAKE_CASE__ : Tuple =trie
for c in word:
if c not in trie_node:
SCREAMING_SNAKE_CASE__ : Optional[Any] ={}
SCREAMING_SNAKE_CASE__ : Dict =trie_node[c]
SCREAMING_SNAKE_CASE__ : str =True
SCREAMING_SNAKE_CASE__ : str =len(lowerCamelCase_ )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCamelCase__ : Union[str, Any] ) -> bool:
if index == len_string:
return True
SCREAMING_SNAKE_CASE__ : str =trie
for i in range(lowerCamelCase_, lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ : str =trie_node.get(string[i], lowerCamelCase_ )
if trie_node is None:
return False
if trie_node.get(lowerCamelCase_, lowerCamelCase_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
a_ = False
try:
a_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , __lowercase : str = None , __lowercase : list = [] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =0
SCREAMING_SNAKE_CASE__ : Any =choices
SCREAMING_SNAKE_CASE__ : List[Any] =prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ : List[str] ='''*'''
else:
SCREAMING_SNAKE_CASE__ : List[str] ='''➔ '''
def __magic_name__ ( self : int , __lowercase : List[Any] , __lowercase : str = "" ) -> str:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCamelCase_ )
else:
forceWrite(self.choices[index] , UpperCamelCase_ )
def __magic_name__ ( self : Any , __lowercase : int ) -> List[Any]:
if index == self.position:
forceWrite(F" {self.arrow_char} " )
self.write_choice(UpperCamelCase_ )
else:
forceWrite(F" {self.choices[index]}" )
reset_cursor()
def __magic_name__ ( self : Dict , __lowercase : Direction , __lowercase : int = 1 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase_ )
move_cursor(UpperCamelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def __magic_name__ ( self : Dict ) -> Optional[Any]:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def __magic_name__ ( self : str ) -> str:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def __magic_name__ ( self : int ) -> Union[str, Any]:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase_ )] for number in range(10 )] )
def __magic_name__ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : int =int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE__ : Any =index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase_ )
else:
return
else:
return
def __magic_name__ ( self : int , __lowercase : int = 0 ) -> Optional[Any]:
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase_ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE__ : int =int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE__ : Any =default_choice
else:
SCREAMING_SNAKE_CASE__ : Dict =self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(UpperCamelCase_ , '''\n''' )
return choice
| 714 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[Any]=13 , __lowercase : str=32 , __lowercase : int=2 , __lowercase : str=3 , __lowercase : Tuple=16 , __lowercase : int=[32, 64, 1_28] , __lowercase : Union[str, Any]=[1, 2, 1] , __lowercase : Union[str, Any]=[2, 2, 4] , __lowercase : Any=2 , __lowercase : int=2.0 , __lowercase : Optional[int]=True , __lowercase : Any=0.0 , __lowercase : Union[str, Any]=0.0 , __lowercase : Tuple=0.1 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=False , __lowercase : Dict=True , __lowercase : List[Any]=0.02 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[Any]=True , __lowercase : str=None , __lowercase : str=True , __lowercase : str=10 , __lowercase : List[Any]=8 , __lowercase : Any=["stage1", "stage2"] , __lowercase : List[str]=[1, 2] , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : str =batch_size
SCREAMING_SNAKE_CASE__ : str =image_size
SCREAMING_SNAKE_CASE__ : int =patch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_channels
SCREAMING_SNAKE_CASE__ : Dict =embed_dim
SCREAMING_SNAKE_CASE__ : str =hidden_sizes
SCREAMING_SNAKE_CASE__ : Any =depths
SCREAMING_SNAKE_CASE__ : List[str] =num_heads
SCREAMING_SNAKE_CASE__ : Optional[int] =window_size
SCREAMING_SNAKE_CASE__ : Dict =mlp_ratio
SCREAMING_SNAKE_CASE__ : Any =qkv_bias
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =drop_path_rate
SCREAMING_SNAKE_CASE__ : int =hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] =use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Any =patch_norm
SCREAMING_SNAKE_CASE__ : List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] =is_training
SCREAMING_SNAKE_CASE__ : Optional[Any] =scope
SCREAMING_SNAKE_CASE__ : int =use_labels
SCREAMING_SNAKE_CASE__ : Dict =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[str] =encoder_stride
SCREAMING_SNAKE_CASE__ : Union[str, Any] =out_features
SCREAMING_SNAKE_CASE__ : List[str] =out_indices
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Any =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __magic_name__ ( self : Optional[Any] , __lowercase : Any , __lowercase : Any , __lowercase : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =FocalNetModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase )
SCREAMING_SNAKE_CASE__ : int =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ : Union[str, Any] =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __magic_name__ ( self : int , __lowercase : str , __lowercase : Optional[int] , __lowercase : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =FocalNetBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE__ : Dict =None
SCREAMING_SNAKE_CASE__ : List[str] =FocalNetBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : int =model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self : Dict , __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =FocalNetForMaskedImageModeling(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Any =1
SCREAMING_SNAKE_CASE__ : str =FocalNetForMaskedImageModeling(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : int =FocalNetForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Any =1
SCREAMING_SNAKE_CASE__ : Optional[int] =FocalNetForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Dict =config_and_inputs
SCREAMING_SNAKE_CASE__ : int ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =FocalNetModelTester(self )
SCREAMING_SNAKE_CASE__ : Any =ConfigTester(self , config_class=__lowercase , embed_dim=37 , has_text_modality=__lowercase )
def __magic_name__ ( self : int ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : Union[str, Any] ) -> Any:
return
def __magic_name__ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def __magic_name__ ( self : Optional[int] ) -> Dict:
pass
def __magic_name__ ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def __magic_name__ ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : int =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Any =["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowercase )
def __magic_name__ ( self : Any , __lowercase : Any , __lowercase : int , __lowercase : int , __lowercase : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : int =outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] =getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__lowercase ) , __lowercase )
# FocalNet has a different seq_length
SCREAMING_SNAKE_CASE__ : Dict =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Dict =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE__ : int =outputs.reshaped_hidden_states
self.assertEqual(len(__lowercase ) , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE__ : str =(
reshaped_hidden_states[0].view(__lowercase , __lowercase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __magic_name__ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Tuple =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Any =True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , __lowercase )
def __magic_name__ ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any =3
SCREAMING_SNAKE_CASE__ : Any =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ : Any =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : List[Any] =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ : Dict =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE__ : Any =True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[str] =True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , (padded_height, padded_width) )
@slow
def __magic_name__ ( self : Optional[int] ) -> Any:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] =FocalNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : str =_config_zero_init(__lowercase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(config=__lowercase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : str ) -> int:
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] =model(**__lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =torch.tensor([0.2166, -0.4368, 0.2191] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
snake_case_ = (FocalNetBackbone,) if is_torch_available() else ()
snake_case_ = FocalNetConfig
snake_case_ = False
def __magic_name__ ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Any =FocalNetModelTester(self )
| 715 |
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] =True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if a[i].islower():
SCREAMING_SNAKE_CASE__ : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple ):
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : Any =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : Dict =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : int =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : Any =embedding_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : str =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE__ : str =num_labels
SCREAMING_SNAKE_CASE__ : List[str] =num_choices
SCREAMING_SNAKE_CASE__ : List[str] =scope
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.num_choices
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return torch.tensor(
UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase )
model.to(__lowercase )
model.half()
SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj]
SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
| 665 | 0 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ : Dict =''''''
SCREAMING_SNAKE_CASE__ : Optional[int] =''''''
SCREAMING_SNAKE_CASE__ : Optional[Any] =[]
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : int =2_56
SCREAMING_SNAKE_CASE__ : str =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : List[str] =0
SCREAMING_SNAKE_CASE__ : List[str] =0
def __magic_name__ ( self : Dict , __lowercase : int ) -> int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =cva.imread(lowerCAmelCase_ , 0 )
SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(self.img )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='''x''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.sum(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE__ : int =x[i] / self.k
self.sk += prk
SCREAMING_SNAKE_CASE__ : Tuple =(self.L - 1) * self.sk
if self.rem != 0:
SCREAMING_SNAKE_CASE__ : List[Any] =int(last % last )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(np.ma.count(self.img ) / self.img[1].size )
SCREAMING_SNAKE_CASE__ : Tuple =self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
SCREAMING_SNAKE_CASE__ : List[Any] =self.img[j][i]
if num != self.last_list[num]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def __magic_name__ ( self : int ) -> Tuple:
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
a_ = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
a_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 717 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 665 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
a_ = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Union[str, Any] , **__lowercase : Optional[int] ) -> int:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__ : List[str] =deprecated_arg[3:]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =not kwargs.pop(UpperCAmelCase__ )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE__ : Optional[int] =kwargs.pop('''tpu_name''' , self.tpu_name )
SCREAMING_SNAKE_CASE__ : List[Any] =kwargs.pop('''device_idx''' , self.device_idx )
SCREAMING_SNAKE_CASE__ : str =kwargs.pop('''eager_mode''' , self.eager_mode )
SCREAMING_SNAKE_CASE__ : Optional[Any] =kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**UpperCAmelCase__ )
snake_case_ = field(
default=lowerCamelCase , metadata={"""help""": """Name of TPU"""} , )
snake_case_ = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
snake_case_ = field(default=lowerCamelCase , metadata={"""help""": """Benchmark models in eager model."""} )
snake_case_ = field(
default=lowerCamelCase , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def __magic_name__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''tf'''] )
SCREAMING_SNAKE_CASE__ : List[Any] =None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE__ : str =None
return tpu
@cached_property
def __magic_name__ ( self : Dict ) -> str:
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE__ : Tuple =tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
SCREAMING_SNAKE_CASE__ : List[Any] =tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}" )
return strategy
@property
def __magic_name__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def __magic_name__ ( self : List[str] ) -> Any:
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __magic_name__ ( self : int ) -> str:
'''simple docstring'''
return self.n_gpu > 0
| 718 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
a_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""DPTFeatureExtractor"""]
a_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 665 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =credit_card_number
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(__a ) - 2
for i in range(__a, -1, -2 ):
# double the value of every second digit
SCREAMING_SNAKE_CASE__ : List[str] =int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =cc_number[:i] + str(__a ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__a ) - 1, -1, -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(f"{error_message} it has nonnumerical characters." )
return False
if not 1_3 <= len(__a ) <= 1_6:
print(f"{error_message} of its length." )
return False
if not validate_initial_digits(__a ):
print(f"{error_message} of its first two digits." )
return False
if not luhn_validation(__a ):
print(f"{error_message} it fails the Luhn check." )
return False
print(f"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 720 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =size
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size
SCREAMING_SNAKE_CASE__ : str =[0] * size
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =value
while index < self.size:
SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ : List[str] =value
else:
SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ : str =0
while left <= right:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase )
if left <= current_left:
SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] )
SCREAMING_SNAKE_CASE__ : Any =current_left
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __SCREAMING_SNAKE_CASE ( _snake_case ):
def __magic_name__ ( self : List[Any] , __lowercase : Union[str, Any] ) -> List[Any]:
return 0.0
def _a( UpperCamelCase__ : np.ndarray, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _a( UpperCamelCase__ : FilterType, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =5_1_2
SCREAMING_SNAKE_CASE__ : Dict =[1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[filter_type.process(UpperCamelCase__ ) for item in inputs]
SCREAMING_SNAKE_CASE__ : List[str] =[0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE__ : List[Any] =np.abs(np.fft.fft(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ : List[Any] =2_0 * np.logaa(UpperCamelCase__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4, samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE__ : Tuple =get_bounds(UpperCamelCase__, UpperCamelCase__ )
plt.ylim(max([-8_0, bounds[0]] ), min([8_0, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(UpperCamelCase__ )
plt.show()
def _a( UpperCamelCase__ : FilterType, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =5_1_2
SCREAMING_SNAKE_CASE__ : Optional[Any] =[1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE__ : Optional[Any] =[filter_type.process(UpperCamelCase__ ) for item in inputs]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE__ : Dict =np.angle(np.fft.fft(UpperCamelCase__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4, samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(UpperCamelCase__, -2 * pi ) )
plt.show()
| 721 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""vqvae"""]
def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase )
def __magic_name__ ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , __lowercase ) else 10_00
@torch.no_grad()
def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowercase , device=self.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =noise
SCREAMING_SNAKE_CASE__ : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample(
generator=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample''']
else:
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample''']
if isinstance(self.scheduler , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE__ : Any =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) )
@torch.no_grad()
def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowercase )
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : int =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
| 665 | 0 |
def _a( UpperCamelCase__ : int = 1_0**9 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =1
SCREAMING_SNAKE_CASE__ : Tuple =2
SCREAMING_SNAKE_CASE__ : Tuple =0
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : List[Any] =0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE__ : str =2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 700 |
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] =float(_lowerCAmelCase )
except ValueError:
raise ValueError('''Please enter a valid number''' )
SCREAMING_SNAKE_CASE__ : Any =decimal - int(_lowerCAmelCase )
if fractional_part == 0:
return int(_lowerCAmelCase ), 1
else:
SCREAMING_SNAKE_CASE__ : Tuple =len(str(_lowerCAmelCase ).split('''.''' )[1] )
SCREAMING_SNAKE_CASE__ : List[str] =int(decimal * (1_0**number_of_frac_digits) )
SCREAMING_SNAKE_CASE__ : List[Any] =1_0**number_of_frac_digits
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =denominator, numerator
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] =dividend % divisor
if remainder == 0:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =divisor, remainder
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =numerator / divisor, denominator / divisor
return int(_lowerCAmelCase ), int(_lowerCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''')
| 701 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test'''
SCREAMING_SNAKE_CASE__ : int ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) , 81 )
def __magic_name__ ( self : Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE__ : List[Any] =[
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
| 665 | 0 |
'''simple docstring'''
import os
import sys
a_ = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a_ = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _a( *UpperCamelCase__ : Any, **UpperCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
return AutoConfig.from_pretrained(*lowerCamelCase_, **lowerCamelCase_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _a( *UpperCamelCase__ : List[str], **UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*lowerCamelCase_, **lowerCamelCase_ )
@add_start_docstrings(AutoModel.__doc__ )
def _a( *UpperCamelCase__ : int, **UpperCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
return AutoModel.from_pretrained(*lowerCamelCase_, **lowerCamelCase_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _a( *UpperCamelCase__ : List[str], **UpperCamelCase__ : int ) -> Optional[int]:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*lowerCamelCase_, **lowerCamelCase_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _a( *UpperCamelCase__ : int, **UpperCamelCase__ : Dict ) -> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*lowerCamelCase_, **lowerCamelCase_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _a( *UpperCamelCase__ : Dict, **UpperCamelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*lowerCamelCase_, **lowerCamelCase_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _a( *UpperCamelCase__ : List[str], **UpperCamelCase__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*lowerCamelCase_, **lowerCamelCase_ )
| 702 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size
SCREAMING_SNAKE_CASE__ : List[Any] =num_channels
SCREAMING_SNAKE_CASE__ : int =patch_size
SCREAMING_SNAKE_CASE__ : Tuple =num_frames
SCREAMING_SNAKE_CASE__ : List[Any] =is_training
SCREAMING_SNAKE_CASE__ : List[str] =use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : Any =scope
SCREAMING_SNAKE_CASE__ : int =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels
return config
def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )
# verify the logits shape
SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowercase )
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester(
self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int:
SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
pass
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def __magic_name__ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowercase )
@slow
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] =True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
SCREAMING_SNAKE_CASE__ : str =False
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : List[Any] =True
SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + 1 , len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self : Tuple ) -> List[Any]:
def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowercase ) , __lowercase )
SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[str] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Any ) -> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =prepare_video()
SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 665 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
a_ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Any =do_lower_case
SCREAMING_SNAKE_CASE__ : Any =strip_accents
SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 665 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.