code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase__ ( ):
snake_case : List[str] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowercase__ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowercase__ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowercase__ )
return parser.parse_args()
def UpperCamelCase__ ( ):
snake_case : Optional[Any] = parse_args()
# Import training_script as a module.
snake_case : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case : int = script_fpath.stem
snake_case : List[Any] = importlib.import_module(lowercase__ )
# Patch sys.argv
snake_case : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 148 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A )) | 308 | 0 |
"""simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_UpperCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 363 | """simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
snake_case = BioGptForCausalLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# create attention mask
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
snake_case = self.seq_length // 2
snake_case = 0
# first forward pass
snake_case ,snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case = ids_tensor((1,) , lowerCAmelCase ).item() + 1
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase )] , dim=1 , )
# get two different outputs
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , past_key_values=lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
# first forward pass
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
snake_case ,snake_case = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[
'last_hidden_state'
]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
snake_case = BioGptForCausalLM(lowerCAmelCase )
model.to(lowerCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case ( self , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(lowerCAmelCase )
snake_case = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = BioGptForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = config_and_inputs
snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : str = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : str = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[str] = False
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase , gradient_checkpointing=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase )
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = 'left'
# Define PAD Token = EOS Token = 50256
snake_case = tokenizer.eos_token
snake_case = model.config.eos_token_id
# use different length sentences to test batching
snake_case = [
'Hello, my dog is a little',
'Today, I',
]
snake_case = tokenizer(lowerCAmelCase , return_tensors='pt' , padding=lowerCAmelCase )
snake_case = inputs['input_ids'].to(lowerCAmelCase )
snake_case = model.generate(
input_ids=lowerCAmelCase , attention_mask=inputs['attention_mask'].to(lowerCAmelCase ) , )
snake_case = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(lowerCAmelCase )
snake_case = model.generate(input_ids=lowerCAmelCase )
snake_case = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
snake_case = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(lowerCAmelCase )
snake_case = model.generate(input_ids=lowerCAmelCase , max_length=model.config.max_length - num_paddings )
snake_case = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase )
snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase )
snake_case = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = BioGptModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = 'multi_label_classification'
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
snake_case = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
snake_case = model(lowerCAmelCase )[0]
snake_case = 4_23_84
snake_case = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase )
snake_case = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1E-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase )
torch.manual_seed(0 )
snake_case = tokenizer('COVID-19 is' , return_tensors='pt' ).to(lowerCAmelCase )
snake_case = model.generate(
**lowerCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCAmelCase , )
snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase )
snake_case = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 149 | 0 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
A: Optional[int] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
A: int = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
A: Any = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
'''simple docstring'''
if rouge_types is None:
UpperCAmelCase : int = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
UpperCAmelCase : str = rouge_scorer.RougeScorer(rouge_types=_SCREAMING_SNAKE_CASE , use_stemmer=_SCREAMING_SNAKE_CASE )
if use_aggregator:
UpperCAmelCase : Optional[int] = scoring.BootstrapAggregator()
else:
UpperCAmelCase : Tuple = []
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : int = scorer.score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if use_aggregator:
aggregator.add_scores(_SCREAMING_SNAKE_CASE )
else:
scores.append(_SCREAMING_SNAKE_CASE )
if use_aggregator:
UpperCAmelCase : Tuple = aggregator.aggregate()
else:
UpperCAmelCase : List[Any] = {}
for key in scores[0]:
UpperCAmelCase : Optional[Any] = [score[key] for score in scores]
return result
| 109 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: Optional[int] = logging.get_logger(__name__)
A: Optional[int] = torch.device("cpu")
def _snake_case ( ):
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
def _snake_case ( UpperCamelCase : int ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : str ):
UpperCAmelCase : int = dct.pop(UpperCamelCase )
UpperCAmelCase : Any = val
def _snake_case ( UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
UpperCAmelCase : Optional[Any] = k
if ".pwconv" in k:
UpperCAmelCase : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCAmelCase : Tuple = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCAmelCase : List[Any] = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCAmelCase : Any = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCAmelCase : int = k_new.split(""".""" )
if ls[2].isdigit():
UpperCAmelCase : List[Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Optional[int] ):
UpperCAmelCase : List[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase : Optional[Any] = 1000
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[str] = """imagenet-1k-id2label.json"""
UpperCAmelCase : Dict = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase : Any = [3, 3, 6, 4]
UpperCAmelCase : List[str] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase : Dict = [3, 3, 9, 6]
UpperCAmelCase : Union[str, Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase : int = [4, 3, 10, 5]
UpperCAmelCase : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase : Union[str, Any] = [4, 4, 12, 6]
UpperCAmelCase : List[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCAmelCase : List[str] = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" , check_hash=UpperCamelCase )
else:
UpperCAmelCase : Any = torch.load(UpperCamelCase , map_location="""cpu""" )
UpperCAmelCase : Optional[Any] = checkpoint
UpperCAmelCase : Dict = create_rename_keys(UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load HuggingFace model
UpperCAmelCase : List[Any] = SwiftFormerForImageClassification(UpperCamelCase ).eval()
hf_model.load_state_dict(UpperCamelCase )
# prepare test inputs
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCAmelCase : Optional[int] = processor(images=UpperCamelCase , return_tensors="""pt""" )
# compare outputs from both models
UpperCAmelCase : Optional[int] = get_expected_output(UpperCamelCase )
UpperCAmelCase : List[str] = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase , atol=1e-3 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
A: str = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 109 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : list[list[int]] = []
snake_case_ : list[int] = []
snake_case_ : Dict = 0
snake_case_ : Any = sum(__a )
create_state_space_tree(__a , __a , __a , __a , __a , __a )
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ):
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a , len(__a ) ):
create_state_space_tree(
__a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , )
_SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2]
_SCREAMING_SNAKE_CASE = 9
_SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 88 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
snake_case_ : Dict = precision
snake_case_ : str = ceil(precision / 14 )
snake_case_ : str = 42_68_80 * Decimal(1_00_05 ).sqrt()
snake_case_ : Tuple = 1
snake_case_ : int = 13_59_14_09
snake_case_ : Tuple = Decimal(__a )
for k in range(1 , __a ):
snake_case_ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__a ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 88 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
_lowerCAmelCase = nn.Parameter(A_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
_lowerCAmelCase = nn.Parameter(A_ )
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase = np.asarray(weights[0] )
_lowerCAmelCase = np.asarray(weights[1] )
_lowerCAmelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(A_ ).transpose(1 , 2 ).contiguous().view(-1 , A_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(A_ ).transpose(1 , 2 ).contiguous().view(-1 , A_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(A_ ).view(-1 , A_ ).contiguous().transpose(0 , 1 ) , )
def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = np.asarray(weights[0] )
_lowerCAmelCase = np.asarray(weights[1] )
_lowerCAmelCase = np.asarray(weights[2] )
_lowerCAmelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(A_ ).transpose(1 , 2 ).contiguous().view(-1 , A_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(A_ ).transpose(1 , 2 ).contiguous().view(-1 , A_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(A_ ).transpose(1 , 2 ).contiguous().view(-1 , A_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(A_ ).view(-1 , A_ ).contiguous().transpose(0 , 1 ) , )
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase = weights[0][0][0]
_lowerCAmelCase = np.asarray(layer_norm_a[0] )
_lowerCAmelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(A_ ) , torch.tensor(A_ ) , )
# lsh weights + output
_lowerCAmelCase = weights[0][1]
if len(A_ ) < 4:
set_layer_weights_in_torch_lsh(A_ , torch_block.attention , A_ )
else:
set_layer_weights_in_torch_local(A_ , torch_block.attention , A_ )
# intermediate weighs
_lowerCAmelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(A_ ) == 4:
_lowerCAmelCase = intermediate_weights[2]
# layernorm 2
_lowerCAmelCase = np.asarray(intermediate_weights[0][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(A_ ) , torch.tensor(A_ ) , )
# intermediate dense
_lowerCAmelCase = np.asarray(intermediate_weights[1][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(A_ ).transpose(0 , 1 ).contiguous() , torch.tensor(A_ ) , )
# intermediate out
_lowerCAmelCase = np.asarray(intermediate_weights[4][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(A_ ).transpose(0 , 1 ).contiguous() , torch.tensor(A_ ) , )
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
_lowerCAmelCase = torch_model.reformer
# word embeds
_lowerCAmelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(A_ ) , )
if isinstance(weights[3] , A_ ):
_lowerCAmelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_lowerCAmelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
_lowerCAmelCase = nn.Parameter(torch.tensor(A_ ) )
_lowerCAmelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
A_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_lowerCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(A_ , A_ , A_ )
# output layer norm
_lowerCAmelCase = np.asarray(weights[7][0] )
_lowerCAmelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(A_ ) , torch.tensor(A_ ) , )
# output embeddings
_lowerCAmelCase = np.asarray(weights[9][0] )
_lowerCAmelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(A_ ).transpose(0 , 1 ).contiguous() , torch.tensor(A_ ) , )
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase = ReformerConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase = ReformerModelWithLMHead(A_ )
with open(A_ , "rb" ) as f:
_lowerCAmelCase = pickle.load(A_ )['''weights''']
set_model_weights_in_torch(A_ , A_ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 158 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : Tuple ,lowercase_ : Dict ,lowercase_ : str ):
lowerCAmelCase__ : int = dataset
lowerCAmelCase__ : List[str] = process
lowerCAmelCase__ : Dict = params
def __len__( self : Any ):
return len(self.dataset )
def __getitem__( self : Union[str, Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Union[str, Any] = self.dataset[i]
lowerCAmelCase__ : Optional[Any] = self.process(lowercase_ ,**self.params )
return processed
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase_ : Optional[Any] ,lowercase_ : List[Any] ,lowercase_ : Optional[Any] ,lowercase_ : Tuple=None ):
lowerCAmelCase__ : List[Any] = loader
lowerCAmelCase__ : int = infer
lowerCAmelCase__ : List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Dict = loader_batch_size
# Internal bookkeeping
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[int] = None
def __len__( self : Union[str, Any] ):
return len(self.loader )
def __iter__( self : List[Any] ):
lowerCAmelCase__ : List[Any] = iter(self.loader )
return self
def __lowerCAmelCase ( self : Tuple ):
if isinstance(self._loader_batch_data ,torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCAmelCase__ : Tuple = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCAmelCase__ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowercase_ ,lowercase_ ):
# Convert ModelOutput to tuple first
lowerCAmelCase__ : List[Any] = element.to_tuple()
if isinstance(element[0] ,torch.Tensor ):
lowerCAmelCase__ : List[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
lowerCAmelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowercase_ ,lowercase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] ,torch.Tensor ):
lowerCAmelCase__ : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
lowerCAmelCase__ : Optional[int] = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCAmelCase__ : Dict = None
elif isinstance(element[self._loader_batch_index] ,torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCAmelCase__ : str = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] ,np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCAmelCase__ : Tuple = np.expand_dims(element[self._loader_batch_index] ,0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCAmelCase__ : int = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCAmelCase__ : int = self._loader_batch_data.__class__(lowercase_ )
self._loader_batch_index += 1
return result
def __lowerCAmelCase ( self : Optional[int] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCAmelCase__ : Dict = next(self.iterator )
lowerCAmelCase__ : List[Any] = self.infer(lowercase_ ,**self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowercase_ ,torch.Tensor ):
lowerCAmelCase__ : int = processed
else:
lowerCAmelCase__ : Union[str, Any] = list(processed.keys() )[0]
lowerCAmelCase__ : Union[str, Any] = processed[key]
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : List[Any] = len(lowercase_ )
else:
lowerCAmelCase__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCAmelCase__ : Optional[Any] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCAmelCase__ : str = processed
lowerCAmelCase__ : Any = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : int ,lowercase_ : str ,lowercase_ : str ,lowercase_ : Union[str, Any] ,lowercase_ : int=None ):
super().__init__(lowercase_ ,lowercase_ ,lowercase_ )
def __iter__( self : List[Any] ):
lowerCAmelCase__ : Dict = iter(self.loader )
lowerCAmelCase__ : Tuple = None
return self
def __lowerCAmelCase ( self : Optional[int] ):
if self.subiterator is None:
lowerCAmelCase__ : List[Any] = self.infer(next(self.iterator ) ,**self.params )
try:
# Try to return next item
lowerCAmelCase__ : Optional[int] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCAmelCase__ : Any = self.infer(next(self.iterator ) ,**self.params )
lowerCAmelCase__ : int = next(self.subiterator )
return processed
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __iter__( self : Tuple ):
lowerCAmelCase__ : int = iter(self.loader )
return self
def __lowerCAmelCase ( self : List[Any] ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : str = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCAmelCase__ : Dict = self.loader_batch_item()
lowerCAmelCase__ : Optional[Any] = item.pop('''is_last''' )
accumulator.append(lowercase_ )
if is_last:
return accumulator
while not is_last:
lowerCAmelCase__ : Any = self.infer(next(self.iterator ) ,**self.params )
if self.loader_batch_size is not None:
if isinstance(lowercase_ ,torch.Tensor ):
lowerCAmelCase__ : Tuple = processed
else:
lowerCAmelCase__ : List[Any] = list(processed.keys() )[0]
lowerCAmelCase__ : Union[str, Any] = processed[key]
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : Tuple = len(lowercase_ )
else:
lowerCAmelCase__ : str = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCAmelCase__ : Optional[int] = observed_batch_size
lowerCAmelCase__ : Optional[int] = processed
lowerCAmelCase__ : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCAmelCase__ : Any = self.loader_batch_item()
lowerCAmelCase__ : Optional[Any] = item.pop('''is_last''' )
accumulator.append(lowercase_ )
if is_last:
return accumulator
else:
lowerCAmelCase__ : Dict = processed
lowerCAmelCase__ : Tuple = item.pop('''is_last''' )
accumulator.append(lowercase_ )
return accumulator
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : int ,lowercase_ : Dataset ,lowercase_ : str ):
lowerCAmelCase__ : List[Any] = dataset
lowerCAmelCase__ : List[Any] = key
def __len__( self : List[Any] ):
return len(self.dataset )
def __getitem__( self : str ,lowercase_ : Union[str, Any] ):
return self.dataset[i][self.key]
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Dict ,lowercase_ : Dataset ,lowercase_ : str ,lowercase_ : str ):
lowerCAmelCase__ : str = dataset
lowerCAmelCase__ : List[str] = keya
lowerCAmelCase__ : Optional[Any] = keya
def __len__( self : str ):
return len(self.dataset )
def __getitem__( self : Optional[int] ,lowercase_ : Union[str, Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 106 | 0 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = data
lowerCAmelCase : Tuple = None
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.head
while temp is not None:
print(temp.data , end=" " )
lowerCAmelCase : List[str] = temp.next
print()
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = Node(snake_case__ )
lowerCAmelCase : Union[str, Any] = self.head
lowerCAmelCase : Optional[Any] = new_node
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
lowerCAmelCase : str = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCAmelCase : Union[str, Any] = node_a.next
lowerCAmelCase : Any = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCAmelCase : List[Any] = node_a.next
if node_a is None or node_a is None:
return
lowerCAmelCase , lowerCAmelCase : str = node_a.data, node_a.data
if __name__ == "__main__":
lowerCAmelCase__ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 133 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =KandinskyVaaImgaImgPipeline
a : Optional[int] =["image_embeds", "negative_image_embeds", "image"]
a : Optional[int] =[
"image_embeds",
"negative_image_embeds",
"image",
]
a : str =[
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a : Dict =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 100
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase : int = UNetaDConditionModel(**snake_case__ )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.dummy_unet
lowerCAmelCase : Optional[int] = self.dummy_movq
lowerCAmelCase : List[str] = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCAmelCase : Tuple = DDIMScheduler(**snake_case__ )
lowerCAmelCase : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : List[str] = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((256, 256) )
if str(snake_case__ ).startswith("mps" ):
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : List[str] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = "cpu"
lowerCAmelCase : Dict = self.get_dummy_components()
lowerCAmelCase : Union[str, Any] = self.pipeline_class(**snake_case__ )
lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCAmelCase : Union[str, Any] = output.images
lowerCAmelCase : Union[str, Any] = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : int = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
lowerCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase : Optional[Any] = "A red cartoon frog, 4k"
lowerCAmelCase : int = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCAmelCase : List[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowerCAmelCase : Tuple = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : str = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase : Tuple = pipeline(
image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 133 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_snake_case = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 36 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
lowercase_ = len(__lowerCAmelCase )
lowercase_ = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
lowercase_ = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 0 |
import os
import sys
import unittest
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_UpperCAmelCase : List[Any] = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_UpperCAmelCase : Dict = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = get_test_to_tester_mapping(snake_case )
snake_case_ = get_test_to_tester_mapping(snake_case )
snake_case_ = {'BertModelTest': 'BertModelTester'}
snake_case_ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
def a ( self ):
snake_case_ = get_model_to_test_mapping(snake_case )
snake_case_ = get_model_to_test_mapping(snake_case )
snake_case_ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
snake_case_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
def a ( self ):
snake_case_ = get_model_to_tester_mapping(snake_case )
snake_case_ = get_model_to_tester_mapping(snake_case )
snake_case_ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
snake_case_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
| 364 |
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCamelCase ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 200 | 0 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class SCREAMING_SNAKE_CASE__ ( ctypes.Structure ):
__lowerCAmelCase : List[Any] = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def _snake_case ( ):
if os.name == "nt":
UpperCAmelCase : Optional[int] = CursorInfo()
UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(snake_case__ , ctypes.byref(snake_case__ ) )
UpperCAmelCase : Optional[int] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(snake_case__ , ctypes.byref(snake_case__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def _snake_case ( ):
if os.name == "nt":
UpperCAmelCase : int = CursorInfo()
UpperCAmelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(snake_case__ , ctypes.byref(snake_case__ ) )
UpperCAmelCase : str = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(snake_case__ , ctypes.byref(snake_case__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def _snake_case ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 109 | from math import isqrt, loga
def snake_case ( snake_case__ :int) -> list[int]:
_A = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , snake_case__ , snake_case__):
_A = False
return [i for i in range(2 , snake_case__) if is_prime[i]]
def snake_case ( snake_case__ :int = 800_800 , snake_case__ :int = 800_800) -> int:
_A = degree * loga(snake_case__)
_A = int(snake_case__)
_A = calculate_prime_numbers(snake_case__)
_A = 0
_A = 0
_A = len(snake_case__) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 180 | 0 |
from __future__ import annotations
def lowercase( UpperCamelCase_ = 4 ) -> list[list[int]]:
'''simple docstring'''
UpperCamelCase = abs(_snake_case ) or 4
return [[1 + x + y * row_size for x in range(_snake_case )] for y in range(_snake_case )]
def lowercase( UpperCamelCase_ ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(_snake_case ) )
# OR.. transpose(reverse_column(matrix))
def lowercase( UpperCamelCase_ ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(_snake_case ) )
# OR.. reverse_column(reverse_row(matrix))
def lowercase( UpperCamelCase_ ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(_snake_case ) )
# OR.. transpose(reverse_row(matrix))
def lowercase( UpperCamelCase_ ) -> list[list[int]]:
'''simple docstring'''
UpperCamelCase = [list(_snake_case ) for x in zip(*_snake_case )]
return matrix
def lowercase( UpperCamelCase_ ) -> list[list[int]]:
'''simple docstring'''
UpperCamelCase = matrix[::-1]
return matrix
def lowercase( UpperCamelCase_ ) -> list[list[int]]:
'''simple docstring'''
UpperCamelCase = [x[::-1] for x in matrix]
return matrix
def lowercase( UpperCamelCase_ ) -> None:
'''simple docstring'''
for i in matrix:
print(*_snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 369 | def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = len(UpperCamelCase_ )
UpperCamelCase = len(matrix[0] )
UpperCamelCase = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase , UpperCamelCase = matrix[i], matrix[row]
UpperCamelCase = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = create_tensor(UpperCAmelCase )
A = gather(UpperCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
A = [state.process_index]
A = gather_object(UpperCAmelCase )
assert len(UpperCAmelCase ) == state.num_processes, f"""{gathered_obj}, {len(UpperCAmelCase )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), f"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def __a ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
A = create_tensor(UpperCAmelCase )
A = broadcast(UpperCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __a ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if state.is_main_process:
A = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A = torch.arange(state.num_processes ).to(state.device )
A = pad_across_processes(UpperCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
if state.num_processes != 2:
return
A = create_tensor(UpperCAmelCase )
A = reduce(UpperCAmelCase , """sum""" )
A = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(UpperCAmelCase , UpperCAmelCase ), f"""{reduced_tensor} != {truth_tensor}"""
def __a ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
A = create_tensor(UpperCAmelCase )
A = reduce(UpperCAmelCase , """mean""" )
A = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(UpperCAmelCase , UpperCAmelCase ), f"""{reduced_tensor} != {truth_tensor}"""
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
main()
def __a ( ) ->Optional[int]:
"""simple docstring"""
A = PartialState()
state.print(f"""State: {state}""" )
state.print("""testing gather""" )
test_gather(UpperCAmelCase )
state.print("""testing gather_object""" )
test_gather_object(UpperCAmelCase )
state.print("""testing broadcast""" )
test_broadcast(UpperCAmelCase )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(UpperCAmelCase )
state.print("""testing reduce_sum""" )
test_reduce_sum(UpperCAmelCase )
state.print("""testing reduce_mean""" )
test_reduce_mean(UpperCAmelCase )
if __name__ == "__main__":
main()
| 258 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BioGptTokenizer
__lowerCAmelCase = False
def A (self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def A (self : Tuple , _lowerCAmelCase : List[str] ):
A = """lower newer"""
A = """lower newer"""
return input_text, output_text
def A (self : List[Any] ):
A = BioGptTokenizer(self.vocab_file , self.merges_file )
A = """lower"""
A = ["""low""", """er</w>"""]
A = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
A = tokens + ["""<unk>"""]
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def A (self : Union[str, Any] ):
A = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 258 | 1 |
from typing import List
import numpy as np
def _a ( SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
UpperCamelCase__ : List[str] = {key: len(SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F"\t- key {key} has length {length}" for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
UpperCamelCase__ : Union[str, Any] = max(lists_lengths.values() , default=0 )
return max(1 , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase__ : Dict = []
for group_idx in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
UpperCamelCase__ : Tuple = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
UpperCamelCase__ : Optional[Any] = range(SCREAMING_SNAKE_CASE , start + num_shards_to_add )
shards_indices_per_group.append(SCREAMING_SNAKE_CASE )
return shards_indices_per_group
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase__ : Any = _number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE )
if num_shards == 1:
return [dict(SCREAMING_SNAKE_CASE )]
else:
UpperCamelCase__ : List[Any] = _distribute_shards(num_shards=SCREAMING_SNAKE_CASE , max_num_jobs=SCREAMING_SNAKE_CASE )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(SCREAMING_SNAKE_CASE ) )
]
def _a ( SCREAMING_SNAKE_CASE : List[dict] ):
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , SCREAMING_SNAKE_CASE )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _a ( SCREAMING_SNAKE_CASE : np.random.Generator , SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = {len(SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}
UpperCamelCase__ : Dict = {}
for size in list_sizes:
UpperCamelCase__ : Union[str, Any] = list(range(SCREAMING_SNAKE_CASE ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
UpperCamelCase__ : str = dict(SCREAMING_SNAKE_CASE )
for key, value in shuffled_kwargs.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[Any] = [value[i] for i in indices_per_size[len(SCREAMING_SNAKE_CASE )]]
return shuffled_kwargs
| 371 |
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
UpperCamelCase__ : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCamelCase__ : int = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 | 0 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__:Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : int = XLMRobertaTokenizer
_snake_case : List[Any] = XLMRobertaTokenizerFast
_snake_case : Union[str, Any] = True
_snake_case : int = True
def a__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = XLMRobertaTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self ):
__a = "<pad>"
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def a__ ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCamelCase ) , 1002 )
def a__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def a__ ( self ):
__a = XLMRobertaTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
__a = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__a = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__a = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def a__ ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__a = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(lowerCamelCase )
__a = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__a = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowerCamelCase , lowerCamelCase )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(lowerCamelCase )
__a = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(lowerCamelCase , legacy_format=lowerCamelCase )
__a = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase , lowerCamelCase )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(lowerCamelCase )
__a = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(lowerCamelCase , legacy_format=lowerCamelCase )
__a = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(lowerCamelCase )
__a = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@cached_property
def a__ ( self ):
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def a__ ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase , f.name )
__a = XLMRobertaTokenizer(f.name , keep_accents=lowerCamelCase )
__a = pickle.dumps(lowerCamelCase )
pickle.loads(lowerCamelCase )
def a__ ( self ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = "I was born in 92000, and this is falsé."
__a = tokenizer.tokenize(lowerCamelCase )
__a = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def a__ ( self ):
__a = "Hello World!"
__a = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def a__ ( self ):
__a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
__a = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def a__ ( self ):
# fmt: off
__a = {"input_ids": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 261 | """simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( a ):
return getitem, k
def _lowerCamelCase( a , a ):
return setitem, k, v
def _lowerCamelCase( a ):
return delitem, k
def _lowerCamelCase( a , a , *a ):
try:
return fun(a , *a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__:List[Any] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__:Any = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__:Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase( a ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(a ):
__a , __a = _run_operation(a , a , *a )
__a , __a = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ):
def is_public(a ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(a )}
__a = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 261 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = (1 << p) - 1
for _ in range(p - 2 ):
SCREAMING_SNAKE_CASE = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 193 |
import json
import sys
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE = json.load(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = results[benchmark_name]
SCREAMING_SNAKE_CASE = benchmark_name.split("""/""" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
SCREAMING_SNAKE_CASE = """| metric |"""
SCREAMING_SNAKE_CASE = """|--------|"""
SCREAMING_SNAKE_CASE = """| new / old (diff) |"""
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = benchmark_res[metric_name]
SCREAMING_SNAKE_CASE = metric_vals["""new"""]
SCREAMING_SNAKE_CASE = metric_vals.get("""old""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = metric_vals.get("""diff""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = F""" {new_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else """None"""
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = sys.argv[1]
SCREAMING_SNAKE_CASE_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 193 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a: Optional[int] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Dict = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__a: List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 198 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0.2 , lowercase=0.2 ):
A_ : str = bp_numa
A_ : Optional[int] = bp_numa
A_ : Optional[Any] = bp_numa
A_ : str = conva_get[:2]
A_ : Union[str, Any] = conva_get[2]
A_ : Union[str, Any] = size_pa
A_ : List[str] = rate_w
A_ : Dict = rate_t
A_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
A_ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A_ : Tuple = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A_ : List[Any] = -2 * np.random.rand(self.conva[1] ) + 1
A_ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
A_ : Any = -2 * np.random.rand(self.num_bpa ) + 1
def _a (self , lowercase ):
# save model dict with pickle
A_ : Union[str, Any] = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowercase , """wb""" ) as f:
pickle.dump(lowercase , lowercase )
print(F'Model saved: {save_path}' )
@classmethod
def _a (cls , lowercase ):
# read saved model
with open(lowercase , """rb""" ) as f:
A_ : Optional[int] = pickle.load(lowercase ) # noqa: S301
A_ : Optional[int] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
A_ : Tuple = model_dic.get("""size_pooling1""" )
A_ : Optional[Any] = model_dic.get("""num_bp1""" )
A_ : List[str] = model_dic.get("""num_bp2""" )
A_ : Dict = model_dic.get("""num_bp3""" )
A_ : Tuple = model_dic.get("""rate_weight""" )
A_ : List[Any] = model_dic.get("""rate_thre""" )
# create model instance
A_ : List[str] = CNN(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
# modify model parameter
A_ : int = model_dic.get("""w_conv1""" )
A_ : str = model_dic.get("""wkj""" )
A_ : str = model_dic.get("""vji""" )
A_ : int = model_dic.get("""thre_conv1""" )
A_ : Union[str, Any] = model_dic.get("""thre_bp2""" )
A_ : List[Any] = model_dic.get("""thre_bp3""" )
return conv_ins
def _a (self , lowercase ):
return 1 / (1 + np.exp(-1 * x ))
def _a (self , lowercase ):
return round(lowercase , 3 )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ):
# convolution process
A_ : Dict = convs[0]
A_ : Any = convs[1]
A_ : Tuple = np.shape(lowercase )[0]
# get the data slice of original image data, data_focus
A_ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase ):
A_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
A_ : int = []
A_ : List[str] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowercase ):
A_ : List[Any] = []
for i_focus in range(len(lowercase ) ):
A_ : List[str] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase ) )
A_ : Tuple = np.asmatrix(lowercase ).reshape(
lowercase , lowercase )
data_featuremap.append(lowercase )
# expanding the data slice to One dimenssion
A_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase ) )
A_ : Dict = np.asarray(lowercase )
return focus_list, data_featuremap
def _a (self , lowercase , lowercase , lowercase="average_pool" ):
# pooling process
A_ : Union[str, Any] = len(featuremaps[0] )
A_ : str = int(size_map / size_pooling )
A_ : List[str] = []
for i_map in range(len(lowercase ) ):
A_ : Any = featuremaps[i_map]
A_ : Any = []
for i_focus in range(0 , lowercase , lowercase ):
for j_focus in range(0 , lowercase , lowercase ):
A_ : Tuple = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase ) )
A_ : List[str] = np.asmatrix(lowercase ).reshape(lowercase , lowercase )
featuremap_pooled.append(lowercase )
return featuremap_pooled
def _a (self , lowercase ):
# expanding three dimension data to one dimension list
A_ : List[Any] = []
for i in range(len(lowercase ) ):
A_ : Tuple = np.shape(data[i] )
A_ : str = data[i].reshape(1 , shapes[0] * shapes[1] )
A_ : Tuple = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase )
A_ : Optional[Any] = np.asarray(lowercase )
return data_expanded
def _a (self , lowercase ):
# expanding matrix to one dimension list
A_ : str = np.asarray(lowercase )
A_ : Any = np.shape(lowercase )
A_ : int = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[str] = []
A_ : Union[str, Any] = 0
for i_map in range(lowercase ):
A_ : Union[str, Any] = np.ones((size_map, size_map) )
for i in range(0 , lowercase , lowercase ):
for j in range(0 , lowercase , lowercase ):
A_ : str = pd_pool[
i_pool
]
A_ : Optional[Any] = i_pool + 1
A_ : Union[str, Any] = np.multiply(
lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowercase )
return pd_all
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=bool ):
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowercase )) )
print((""" - - Shape: Teach_Data """, np.shape(lowercase )) )
A_ : Optional[Any] = 0
A_ : Dict = []
A_ : List[Any] = 10000
while rp < n_repeat and mse >= error_accuracy:
A_ : List[Any] = 0
print(F'-------------Learning Time {rp}--------------' )
for p in range(len(lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
A_ : Optional[Any] = np.asmatrix(datas_train[p] )
A_ : str = np.asarray(datas_teach[p] )
A_, A_ : Dict = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : Any = self.pooling(lowercase , self.size_poolinga )
A_ : int = np.shape(lowercase )
A_ : Union[str, Any] = self._expand(lowercase )
A_ : Dict = data_bp_input
A_ : int = np.dot(lowercase , self.vji.T ) - self.thre_bpa
A_ : Any = self.sig(lowercase )
A_ : Optional[int] = np.dot(lowercase , self.wkj.T ) - self.thre_bpa
A_ : List[str] = self.sig(lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
A_ : Optional[Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase , (1 - bp_outa) ) )
A_ : Tuple = np.multiply(
np.dot(lowercase , self.wkj ) , np.multiply(lowercase , (1 - bp_outa) ) )
A_ : Union[str, Any] = np.dot(lowercase , self.vji )
A_ : int = pd_i_all / (self.size_poolinga * self.size_poolinga)
A_ : str = pd_conva_pooled.T.getA().tolist()
A_ : List[Any] = self._calculate_gradient_from_pool(
lowercase , lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
A_ : int = self._expand_mat(pd_conva_all[k_conv] )
A_ : Any = self.rate_weight * np.dot(lowercase , lowercase )
A_ : Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
A_ : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
A_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
A_ : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
A_ : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
A_ : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
A_ : Optional[int] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
A_ : List[Any] = rp + 1
A_ : Union[str, Any] = error_count / patterns
all_mse.append(lowercase )
def draw_error():
A_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowercase , """+-""" )
plt.plot(lowercase , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowercase , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def _a (self , lowercase ):
# model predict
A_ : Tuple = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowercase )) )
for p in range(len(lowercase ) ):
A_ : int = np.asmatrix(datas_test[p] )
A_, A_ : str = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : Dict = self.pooling(lowercase , self.size_poolinga )
A_ : Tuple = self._expand(lowercase )
A_ : int = data_bp_input
A_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
A_ : List[str] = self.sig(lowercase )
A_ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
A_ : List[Any] = self.sig(lowercase )
produce_out.extend(bp_outa.getA().tolist() )
A_ : Any = [list(map(self.do_round , lowercase ) ) for each in produce_out]
return np.asarray(lowercase )
def _a (self , lowercase ):
# return the data of image after convoluting process so we can check it out
A_ : Optional[Any] = np.asmatrix(lowercase )
A_, A_ : Optional[Any] = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : Union[str, Any] = self.pooling(lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 206 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = ["""image_processor""", """tokenizer"""]
UpperCAmelCase__ = """ViTImageProcessor"""
UpperCAmelCase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Dict=None , **UpperCAmelCase : Tuple ) -> int:
lowerCamelCase__ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase , )
lowerCamelCase__ : str = kwargs.pop('feature_extractor' )
lowerCamelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[Any] , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : int ) -> Optional[Any]:
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
lowerCamelCase__ : str = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if visual_prompt is not None:
lowerCamelCase__ : Dict = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if images is not None:
lowerCamelCase__ : Dict = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if visual_prompt is not None and images is not None:
lowerCamelCase__ : Optional[Any] = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCamelCase__ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCamelCase__ : Any = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def A_ ( self : Optional[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> List[Any]:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : List[str] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[int] ) -> Tuple:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A_ ( self : Optional[Any] ) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase , )
return self.image_processor_class
@property
def A_ ( self : Dict ) -> List[str]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase , )
return self.image_processor
| 45 |
from collections import deque
class lowerCAmelCase :
def __init__( self : str , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : int ) -> None:
lowerCamelCase__ : Optional[int] = process_name # process name
lowerCamelCase__ : Optional[int] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase__ : str = arrival_time
lowerCamelCase__ : List[Any] = burst_time # remaining burst time
lowerCamelCase__ : Any = 0 # total time of the process wait in ready queue
lowerCamelCase__ : Tuple = 0 # time from arrival time to completion time
class lowerCAmelCase :
def __init__( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : list[int] , UpperCAmelCase : deque[Process] , UpperCAmelCase : int , ) -> None:
# total number of mlfq's queues
lowerCamelCase__ : Optional[int] = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase__ : List[str] = time_slices
# unfinished process is in this ready_queue
lowerCamelCase__ : List[str] = queue
# current time
lowerCamelCase__ : Optional[Any] = current_time
# finished process is in this sequence queue
lowerCamelCase__ : deque[Process] = deque()
def A_ ( self : Tuple ) -> list[str]:
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def A_ ( self : Tuple , UpperCAmelCase : list[Process] ) -> list[int]:
lowerCamelCase__ : Tuple = []
for i in range(len(UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def A_ ( self : Union[str, Any] , UpperCAmelCase : list[Process] ) -> list[int]:
lowerCamelCase__ : int = []
for i in range(len(UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def A_ ( self : Optional[int] , UpperCAmelCase : list[Process] ) -> list[int]:
lowerCamelCase__ : Tuple = []
for i in range(len(UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def A_ ( self : str , UpperCAmelCase : deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def A_ ( self : int , UpperCAmelCase : Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A_ ( self : Optional[int] , UpperCAmelCase : deque[Process] ) -> deque[Process]:
lowerCamelCase__ : deque[Process] = deque() # sequence deque of finished process
while len(UpperCAmelCase ) != 0:
lowerCamelCase__ : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase__ : Optional[int] = 0
# set the process's turnaround time because it is finished
lowerCamelCase__ : Union[str, Any] = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase__ : Any = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCAmelCase )
self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A_ ( self : str , UpperCAmelCase : deque[Process] , UpperCAmelCase : int ) -> tuple[deque[Process], deque[Process]]:
lowerCamelCase__ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCAmelCase ) ):
lowerCamelCase__ : Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase__ : List[str] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase__ : Any = 0
# set the finish time
lowerCamelCase__ : int = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase__ : Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCAmelCase )
self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A_ ( self : Dict ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCAmelCase : List[str] = Process("""P1""", 0, 53)
_UpperCAmelCase : Union[str, Any] = Process("""P2""", 0, 17)
_UpperCAmelCase : int = Process("""P3""", 0, 68)
_UpperCAmelCase : str = Process("""P4""", 0, 24)
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : Optional[Any] = [17, 25]
_UpperCAmelCase : Optional[int] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
_UpperCAmelCase : Tuple = Process("""P1""", 0, 53)
_UpperCAmelCase : Any = Process("""P2""", 0, 17)
_UpperCAmelCase : Any = Process("""P3""", 0, 68)
_UpperCAmelCase : List[Any] = Process("""P4""", 0, 24)
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Optional[int] = [17, 25]
_UpperCAmelCase : Optional[int] = deque([Pa, Pa, Pa, Pa])
_UpperCAmelCase : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCAmelCase : Dict = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 45 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : List[str] = {'vocab_file': 'vocab.txt'}
A__ : List[Any] = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
A__ : Tuple = {
'facebook/esm2_t6_8M_UR50D': 1_024,
'facebook/esm2_t12_35M_UR50D': 1_024,
}
def _snake_case ( lowerCamelCase__ : Union[str, Any] ) -> Tuple:
with open(lowerCamelCase__ , "r" ) as f:
lowerCamelCase_ : Dict =f.read().splitlines()
return [l.strip() for l in lines]
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Any , snake_case__ : List[str] , snake_case__ : Optional[int]="<unk>" , snake_case__ : int="<cls>" , snake_case__ : Optional[Any]="<pad>" , snake_case__ : Dict="<mask>" , snake_case__ : int="<eos>" , **snake_case__ : Any , ):
super().__init__(**snake_case__ )
lowerCamelCase_ : Any =load_vocab_file(snake_case__ )
lowerCamelCase_ : Any =dict(enumerate(self.all_tokens ) )
lowerCamelCase_ : Union[str, Any] ={tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCamelCase_ : List[Any] =unk_token
lowerCamelCase_ : str =cls_token
lowerCamelCase_ : Any =pad_token
lowerCamelCase_ : str =mask_token
lowerCamelCase_ : List[Any] =eos_token
lowerCamelCase_ : int =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def UpperCAmelCase__ ( self : Dict , snake_case__ : int ):
return self._id_to_token.get(snake_case__ , self.unk_token )
def UpperCAmelCase__ ( self : Any , snake_case__ : str ):
return self._token_to_id.get(snake_case__ , self._token_to_id.get(self.unk_token ) )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
return text.split()
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Optional[int]=False ):
return len(self._id_to_token )
def UpperCAmelCase__ ( self : List[str] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def UpperCAmelCase__ ( self : Any , snake_case__ : str ):
return self._token_to_id.get(snake_case__ , self._token_to_id.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : int ):
return self._id_to_token.get(snake_case__ , self.unk_token )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCamelCase_ : List[str] =[self.cls_token_id]
lowerCamelCase_ : List[Any] =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase__ ( self : int , snake_case__ : List , snake_case__ : Optional[List] = None , snake_case__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCamelCase_ : Dict =[1] + ([0] * len(snake_case__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case__ ) + [1]
return mask
def UpperCAmelCase__ ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
lowerCamelCase_ : Optional[int] =os.path.join(snake_case__ , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(snake_case__ , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return self.get_vocab_size(with_added_tokens=snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Union[List[str], List[AddedToken]] , snake_case__ : bool = False ):
return super()._add_tokens(snake_case__ , special_tokens=snake_case__ )
| 144 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : str ) -> str:
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
lowerCamelCase_ : Optional[Any] =""
while len(lowerCamelCase__ ) % 3 != 0:
lowerCamelCase_ : Any ="0" + bin_string
lowerCamelCase_ : int =[
bin_string[index : index + 3]
for index in range(len(lowerCamelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowerCamelCase_ : int =0
for index, val in enumerate(lowerCamelCase__ ):
oct_val += int(2 ** (2 - index) * int(lowerCamelCase__ ) )
oct_string += str(lowerCamelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 144 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class a__ ( _A ):
a : Optional[int] = """luke"""
def __init__( self , A=50267 , A=500000 , A=768 , A=256 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.0_2 , A=1e-12 , A=True , A=None , A=1 , A=0 , A=2 , **A , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a = vocab_size
a = entity_vocab_size
a = hidden_size
a = entity_emb_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = use_entity_aware_attention
a = classifier_dropout
| 363 |
lowercase__ : str = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase__ : Any = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase__ : Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 180 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : str = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : int = """tokenizer_file"""
SCREAMING_SNAKE_CASE_ : List[str] = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def lowercase_ ( self : List[Any])-> Dict:
'''simple docstring'''
super().setUp()
__lowerCAmelCase: Optional[Any] = BloomTokenizerFast.from_pretrained("bigscience/tokenizer")
tokenizer.save_pretrained(self.tmpdirname)
def lowercase_ ( self : List[Any] , **UpperCamelCase__ : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def lowercase_ ( self : Union[str, Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = self.get_rust_tokenizer()
__lowerCAmelCase: int = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
__lowerCAmelCase: List[str] = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
__lowerCAmelCase: List[str] = tokenizer.batch_encode_plus(UpperCamelCase__)["input_ids"]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: List[Any] = tokenizer.batch_decode(UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Tuple=6)-> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__lowerCAmelCase: Dict = "This is a simple input"
__lowerCAmelCase: str = ["This is a simple input 1", "This is a simple input 2"]
__lowerCAmelCase: int = ("This is a simple input", "This is a pair")
__lowerCAmelCase: Union[str, Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding")
__lowerCAmelCase: Tuple = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" , )
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.get_rust_tokenizer()
__lowerCAmelCase: List[str] = load_dataset("xnli" , "all_languages" , split="test" , streaming=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = next(iter(UpperCamelCase__))["premise"] # pick up one data
__lowerCAmelCase: Any = list(sample_data.values())
__lowerCAmelCase: int = list(map(tokenizer.encode , UpperCamelCase__))
__lowerCAmelCase: str = [tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__) for x in output_tokens]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> str:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 217 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class snake_case :
SCREAMING_SNAKE_CASE_ : Optional[Union[str, Path]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
def lowercase_ ( self : str)-> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase__) for k, v in self.__dict__.items()})
| 217 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class A ( __snake_case ):
lowerCamelCase = """vit_mae"""
def __init__( self : Dict,lowercase_ : str=7_6_8,lowercase_ : Any=1_2,lowercase_ : Optional[int]=1_2,lowercase_ : str=3_0_7_2,lowercase_ : Tuple="gelu",lowercase_ : Optional[int]=0.0,lowercase_ : Tuple=0.0,lowercase_ : Optional[Any]=0.02,lowercase_ : Union[str, Any]=1E-12,lowercase_ : Dict=2_2_4,lowercase_ : Optional[Any]=1_6,lowercase_ : List[Any]=3,lowercase_ : List[Any]=True,lowercase_ : Optional[int]=1_6,lowercase_ : List[Any]=5_1_2,lowercase_ : str=8,lowercase_ : str=2_0_4_8,lowercase_ : int=0.75,lowercase_ : Tuple=False,**lowercase_ : List[str],)-> int:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = qkv_bias
A__ = decoder_num_attention_heads
A__ = decoder_hidden_size
A__ = decoder_num_hidden_layers
A__ = decoder_intermediate_size
A__ = mask_ratio
A__ = norm_pix_loss
| 370 |
import comet # From: unbabel-comet
import torch
import datasets
lowercase_ = datasets.logging.get_logger(__name__)
lowercase_ = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
lowercase_ = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
lowercase_ = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,homepage='https://unbabel.github.io/COMET/html/index.html',inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'sources': datasets.Value('string',id='sequence' ),
'predictions': datasets.Value('string',id='sequence' ),
'references': datasets.Value('string',id='sequence' ),
} ),codebase_urls=['https://github.com/Unbabel/COMET'],reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
],)
def snake_case__ ( self : Optional[Any],lowercase_ : Dict )-> str:
'''simple docstring'''
if self.config_name == "default":
A__ = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
A__ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def snake_case__ ( self : List[Any],lowercase_ : Dict,lowercase_ : int,lowercase_ : List[Any],lowercase_ : Tuple=None,lowercase_ : List[str]=False )-> int:
'''simple docstring'''
if gpus is None:
A__ = 1 if torch.cuda.is_available() else 0
A__ = {'src': sources, 'mt': predictions, 'ref': references}
A__ = [dict(zip(lowercase_,lowercase_ ) ) for t in zip(*data.values() )]
A__ , A__ = self.scorer.predict(lowercase_,gpus=lowercase_,progress_bar=lowercase_ )
return {"mean_score": mean_score, "scores": scores}
| 282 | 0 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : torch.FloatTensor
_SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=0.999 , lowerCAmelCase__ : Tuple="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : int ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__UpperCAmelCase : str = []
for i in range(lowerCAmelCase__ ):
__UpperCAmelCase : int = i / num_diffusion_timesteps
__UpperCAmelCase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , __UpperCAmelCase = 1_000 , __UpperCAmelCase = "fixed_small_log" , __UpperCAmelCase = True , __UpperCAmelCase = 1.0 , __UpperCAmelCase = "epsilon" , __UpperCAmelCase = "squaredcos_cap_v2" , ) -> List[str]:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
__UpperCAmelCase : List[Any] = betas_for_alpha_bar(__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = 1.0 - self.betas
__UpperCAmelCase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
__UpperCAmelCase : Union[str, Any] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__UpperCAmelCase : str = 1.0
# setable values
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = torch.from_numpy(np.arange(0 , __UpperCAmelCase )[::-1].copy() )
__UpperCAmelCase : Optional[Any] = variance_type
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = num_inference_steps
__UpperCAmelCase : int = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__UpperCAmelCase : Dict = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__UpperCAmelCase : Dict = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
if prev_timestep is None:
__UpperCAmelCase : List[Any] = t - 1
__UpperCAmelCase : Union[str, Any] = self.alphas_cumprod[t]
__UpperCAmelCase : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__UpperCAmelCase : List[Any] = 1 - alpha_prod_t
__UpperCAmelCase : Any = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__UpperCAmelCase : List[str] = self.betas[t]
else:
__UpperCAmelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCAmelCase : int = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__UpperCAmelCase : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__UpperCAmelCase : Tuple = torch.log(torch.clamp(__UpperCAmelCase , min=1E-20 ) )
__UpperCAmelCase : List[str] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__UpperCAmelCase : Dict = variance.log()
__UpperCAmelCase : List[Any] = beta.log()
__UpperCAmelCase : Union[str, Any] = (predicted_variance + 1) / 2
__UpperCAmelCase : Any = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__UpperCAmelCase , __UpperCAmelCase : int = torch.split(__UpperCAmelCase , sample.shape[1] , dim=1 )
else:
__UpperCAmelCase : List[str] = None
# 1. compute alphas, betas
if prev_timestep is None:
__UpperCAmelCase : Optional[Any] = t - 1
__UpperCAmelCase : List[Any] = self.alphas_cumprod[t]
__UpperCAmelCase : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__UpperCAmelCase : Tuple = 1 - alpha_prod_t
__UpperCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__UpperCAmelCase : Tuple = self.betas[t]
__UpperCAmelCase : int = self.alphas[t]
else:
__UpperCAmelCase : int = 1 - alpha_prod_t / alpha_prod_t_prev
__UpperCAmelCase : Union[str, Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCAmelCase : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCAmelCase : Tuple = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCAmelCase : Any = torch.clamp(
__UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__UpperCAmelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__UpperCAmelCase : int = 0
if t > 0:
__UpperCAmelCase : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCAmelCase , device=model_output.device )
__UpperCAmelCase : int = self._get_variance(
__UpperCAmelCase , predicted_variance=__UpperCAmelCase , prev_timestep=__UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
__UpperCAmelCase : Optional[Any] = variance
elif self.variance_type == "learned_range":
__UpperCAmelCase : Union[str, Any] = (0.5 * variance).exp()
else:
raise ValueError(
f'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
""" for the UnCLIPScheduler.""" )
__UpperCAmelCase : Union[str, Any] = variance * variance_noise
__UpperCAmelCase : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> torch.FloatTensor:
'''simple docstring'''
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
__UpperCAmelCase : Any = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__UpperCAmelCase : Union[str, Any] = timesteps.to(original_samples.device )
__UpperCAmelCase : Tuple = alphas_cumprod[timesteps] ** 0.5
__UpperCAmelCase : Optional[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__UpperCAmelCase : Union[str, Any] = sqrt_alpha_prod.unsqueeze(-1 )
__UpperCAmelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCAmelCase : Tuple = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__UpperCAmelCase : Any = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__UpperCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 254 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 254 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_SCREAMING_SNAKE_CASE )] )
SCREAMING_SNAKE_CASE = np.array(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _SCREAMING_SNAKE_CASE ) ) , x.transpose() ) , _SCREAMING_SNAKE_CASE )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (1, 2, 1)
SCREAMING_SNAKE_CASE = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE = SARIMAX(
_SCREAMING_SNAKE_CASE , exog=_SCREAMING_SNAKE_CASE , order=_SCREAMING_SNAKE_CASE , seasonal_order=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = model.fit(disp=_SCREAMING_SNAKE_CASE , maxiter=6_00 , method="""nm""" )
SCREAMING_SNAKE_CASE = model_fit.predict(1 , len(_SCREAMING_SNAKE_CASE ) , exog=[test_match] )
return result[0]
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = regressor.predict(_SCREAMING_SNAKE_CASE )
return y_pred[0]
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
train_user.sort()
SCREAMING_SNAKE_CASE = np.percentile(_SCREAMING_SNAKE_CASE , 25 )
SCREAMING_SNAKE_CASE = np.percentile(_SCREAMING_SNAKE_CASE , 75 )
SCREAMING_SNAKE_CASE = qa - qa
SCREAMING_SNAKE_CASE = qa - (iqr * 0.1)
return low_lim
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE = not_safe + 1
else:
if abs(abs(_SCREAMING_SNAKE_CASE ) - abs(_SCREAMING_SNAKE_CASE ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
SCREAMING_SNAKE_CASE_ = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
SCREAMING_SNAKE_CASE_ = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
SCREAMING_SNAKE_CASE_ = Normalizer().fit_transform(data_input_df.values)
# split data
SCREAMING_SNAKE_CASE_ = normalize_df[:, 2].tolist()
SCREAMING_SNAKE_CASE_ = normalize_df[:, 0].tolist()
SCREAMING_SNAKE_CASE_ = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
SCREAMING_SNAKE_CASE_ = normalize_df[:, [1, 2]].tolist()
SCREAMING_SNAKE_CASE_ = x[: len(x) - 1]
SCREAMING_SNAKE_CASE_ = x[len(x) - 1 :]
# for linear regression & sarimax
SCREAMING_SNAKE_CASE_ = total_date[: len(total_date) - 1]
SCREAMING_SNAKE_CASE_ = total_user[: len(total_user) - 1]
SCREAMING_SNAKE_CASE_ = total_match[: len(total_match) - 1]
SCREAMING_SNAKE_CASE_ = total_date[len(total_date) - 1 :]
SCREAMING_SNAKE_CASE_ = total_user[len(total_user) - 1 :]
SCREAMING_SNAKE_CASE_ = total_match[len(total_match) - 1 :]
# voting system with forecasting
SCREAMING_SNAKE_CASE_ = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
SCREAMING_SNAKE_CASE_ = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 193 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if not self.initialized:
SCREAMING_SNAKE_CASE = RagRetriever(
lowerCamelCase__ ,question_encoder_tokenizer=lowerCamelCase__ ,generator_tokenizer=lowerCamelCase__ ,index=lowerCamelCase__ ,init_retrieval=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
'''simple docstring'''
self.retriever.index.init_index()
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.retriever._main_retrieve(lowerCamelCase__ ,lowerCamelCase__ )
return doc_ids, retrieved_doc_embeds
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict=None ) -> Any:
'''simple docstring'''
if index is not None and index.is_initialized() and len(lowerCamelCase__ ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowerCamelCase__ ,question_encoder_tokenizer=lowerCamelCase__ ,generator_tokenizer=lowerCamelCase__ ,index=lowerCamelCase__ ,init_retrieval=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
for worker in self.retrieval_workers
] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : int ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
SCREAMING_SNAKE_CASE = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = ray.get(random_worker.retrieve.remote(lowerCamelCase__ ,lowerCamelCase__ ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self._main_retrieve(lowerCamelCase__ ,lowerCamelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Union[str, Any]=None ,**lowerCamelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
return super(lowerCamelCase__ ,cls ).get_tokenizers(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[Any]=None ,**lowerCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.pop("""config""" ,lowerCamelCase__ ) or RagConfig.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained(lowerCamelCase__ ,config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rag_tokenizer.question_encoder
SCREAMING_SNAKE_CASE = rag_tokenizer.generator
if indexed_dataset is not None:
SCREAMING_SNAKE_CASE = """custom"""
SCREAMING_SNAKE_CASE = CustomHFIndex(config.retrieval_vector_size ,lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = cls._build_index(lowerCamelCase__ )
return cls(
lowerCamelCase__ ,question_encoder_tokenizer=lowerCamelCase__ ,generator_tokenizer=lowerCamelCase__ ,retrieval_workers=lowerCamelCase__ ,index=lowerCamelCase__ ,)
| 193 | 1 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A (__A : List[Any] ) -> Tuple: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def A (__A : Optional[Any] ) -> Optional[Any]: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : str
class __snake_case ( a ):
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
UpperCAmelCase_ = 1
UpperCAmelCase_ = [1, 2]
UpperCAmelCase_ = {'''a''': 1, '''b''': 2}
UpperCAmelCase_ = {'''a''': [1, 2], '''b''': [3, 4]}
UpperCAmelCase_ = {'''a''': {'''1''': 1}, '''b''': 2}
UpperCAmelCase_ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = [2, 3]
UpperCAmelCase_ = {'''a''': 2, '''b''': 3}
UpperCAmelCase_ = {'''a''': [2, 3], '''b''': [4, 5]}
UpperCAmelCase_ = {'''a''': {'''1''': 2}, '''b''': 3}
UpperCAmelCase_ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(_snake_case , _snake_case) , _snake_case)
self.assertEqual(map_nested(_snake_case , _snake_case) , _snake_case)
self.assertEqual(map_nested(_snake_case , _snake_case) , _snake_case)
self.assertEqual(map_nested(_snake_case , _snake_case) , _snake_case)
self.assertEqual(map_nested(_snake_case , _snake_case) , _snake_case)
self.assertEqual(map_nested(_snake_case , _snake_case) , _snake_case)
self.assertEqual(map_nested(_snake_case , _snake_case) , _snake_case)
self.assertEqual(map_nested(_snake_case , _snake_case) , _snake_case)
UpperCAmelCase_ = 2
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case) , _snake_case)
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case) , _snake_case)
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case) , _snake_case)
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case) , _snake_case)
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case) , _snake_case)
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case) , _snake_case)
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case) , _snake_case)
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case) , _snake_case)
UpperCAmelCase_ = {'''a''': np.eye(2), '''b''': np.zeros(3), '''c''': np.ones(2)}
UpperCAmelCase_ = {'''a''': 2, '''b''': 0, '''c''': 2}
UpperCAmelCase_ = {
'''a''': np.eye(2).astype(_snake_case),
'''b''': np.zeros(3).astype(_snake_case),
'''c''': np.ones(2).astype(_snake_case),
}
self.assertEqual(map_nested(_snake_case , _snake_case , map_numpy=_snake_case) , _snake_case)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_snake_case , _snake_case , map_numpy=_snake_case).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_snake_case , _snake_case , map_numpy=_snake_case , num_proc=_snake_case) , _snake_case)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_snake_case , _snake_case , map_numpy=_snake_case , num_proc=_snake_case).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_snake_case): # can't pickle a local lambda
map_nested(lambda _snake_case: x + 1 , _snake_case , num_proc=_snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = {'''a''': 1, '''b''': 2}
UpperCAmelCase_ = {'''a''': 3, '''b''': 4}
UpperCAmelCase_ = {'''a''': 5, '''b''': 6}
UpperCAmelCase_ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))])
self.assertEqual(sorted(zip_dict(_snake_case , _snake_case , _snake_case)) , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
class __snake_case :
UpperCAmelCase__ : int = '''bar'''
UpperCAmelCase_ = Foo()
self.assertEqual(foo.my_attr , '''bar''')
with temporary_assignment(_snake_case , '''my_attr''' , '''BAR'''):
self.assertEqual(foo.my_attr , '''BAR''')
self.assertEqual(foo.my_attr , '''bar''')
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A (__A : Tuple , __A : List[Any] , __A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
UpperCAmelCase_ = {F"""{i}""": i for i in range(__A )}
UpperCAmelCase_ = map_nested(lambda __A : x + 10 , __A , num_proc=__A , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __snake_case ( a ):
@require_tf
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
UpperCAmelCase_ = layers.Dense(2)
def gen_random_output():
UpperCAmelCase_ = tf.random.uniform((1, 3))
return model(_snake_case).numpy()
with temp_seed(42 , set_tensorflow=_snake_case):
UpperCAmelCase_ = gen_random_output()
with temp_seed(42 , set_tensorflow=_snake_case):
UpperCAmelCase_ = gen_random_output()
UpperCAmelCase_ = gen_random_output()
np.testing.assert_equal(_snake_case , _snake_case)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@require_torch
def lowerCamelCase ( self : Any):
"""simple docstring"""
import torch
def gen_random_output():
UpperCAmelCase_ = torch.nn.Linear(3 , 2)
UpperCAmelCase_ = torch.rand(1 , 3)
return model(_snake_case).detach().numpy()
with temp_seed(42 , set_pytorch=_snake_case):
UpperCAmelCase_ = gen_random_output()
with temp_seed(42 , set_pytorch=_snake_case):
UpperCAmelCase_ = gen_random_output()
UpperCAmelCase_ = gen_random_output()
np.testing.assert_equal(_snake_case , _snake_case)
self.assertGreater(np.abs(outa - outa).sum() , 0)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3)
with temp_seed(42):
UpperCAmelCase_ = gen_random_output()
with temp_seed(42):
UpperCAmelCase_ = gen_random_output()
UpperCAmelCase_ = gen_random_output()
np.testing.assert_equal(_snake_case , _snake_case)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@pytest.mark.parametrize('''input_data''' , [{}] )
def A (__A : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = NestedDataStructure(__A ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def A (__A : Tuple , __A : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = NestedDataStructure(__A ).flatten()
assert output == expected_output
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = A(x=1 , y='''foobar''' )
UpperCAmelCase_ = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(__A ) == expected_output
UpperCAmelCase_ = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
UpperCAmelCase_ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(__A ) == expected_output
with pytest.raises(__A ):
asdict([1, A(x=10 , y='''foo''' )] )
def A (__A : str ) -> Optional[int]:
"""simple docstring"""
return text.split()
def A (__A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A () -> Tuple:
"""simple docstring"""
with Pool(2 ) as pool:
UpperCAmelCase_ = list(iflatmap_unordered(__A , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__A ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCAmelCase_ = list(iflatmap_unordered(__A , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__A ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCAmelCase_ = []
for yield_time, content in iflatmap_unordered(
__A , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__A )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(__A ) == 4
| 51 |
snake_case_ : Dict = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 51 | 1 |
"""simple docstring"""
from __future__ import annotations
a__ : int = list[list[int]]
# assigning initial values to the grid
a__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if location := find_empty_location(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = digit
if sudoku(lowerCAmelCase_ ) is not None:
return grid
__SCREAMING_SNAKE_CASE = 0
return None
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
for row in grid:
for cell in row:
print(lowerCAmelCase_ , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
a__ : List[Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 195 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if not nums:
return 0
__SCREAMING_SNAKE_CASE = nums[0]
__SCREAMING_SNAKE_CASE = 0
for num in nums[1:]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowercase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowercase_ = "cuda" if torch.cuda.is_available() else "cpu"
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=100 , lowerCAmelCase__ : Optional[int]=" " ) -> List[str]:
__a = text.split(lowerCAmelCase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )]
def lowercase ( lowerCAmelCase__ : dict ) -> dict:
__a , __a = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(lowerCAmelCase__ ):
titles.append(title if title is not None else '''''' )
texts.append(lowerCAmelCase__ )
return {"title": titles, "text": texts}
def lowercase ( lowerCAmelCase__ : dict , lowerCAmelCase__ : DPRContextEncoder , lowerCAmelCase__ : DPRContextEncoderTokenizerFast ) -> dict:
__a = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
__a = ctx_encoder(input_ids.to(device=lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase ( lowerCAmelCase__ : "RagExampleArguments" , lowerCAmelCase__ : "ProcessingArguments" , lowerCAmelCase__ : "IndexHnswArguments" , ) -> Tuple:
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__a = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__a = dataset.map(lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
__a = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowerCAmelCase__ )
__a = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__a = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
__a = dataset.map(
partial(lowerCAmelCase__ , ctx_encoder=lowerCAmelCase__ , ctx_tokenizer=lowerCAmelCase__ ) , batched=lowerCAmelCase__ , batch_size=processing_args.batch_size , features=lowerCAmelCase__ , )
# And finally save your dataset
__a = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(lowerCAmelCase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__a = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=lowerCAmelCase__ )
# And save the index
__a = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(lowerCAmelCase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = field(
default=str(Path(__SCREAMING_SNAKE_CASE ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
__UpperCAmelCase : str = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
__UpperCAmelCase : str = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=str(Path(__SCREAMING_SNAKE_CASE ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
__UpperCAmelCase : int = field(
default=1_6 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : int = field(
default=7_6_8 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
__UpperCAmelCase : int = field(
default=1_2_8 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowercase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowercase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 45 |
"""simple docstring"""
a : Any = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
a : List[Any] = {value: key for key, value in encode_dict.items()}
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : int = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
a : Optional[Any] = ""
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
a : List[Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 105 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a ( unittest.TestCase ):
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def snake_case_ ( self ):
_lowerCamelCase = self.dummy_uncond_unet
_lowerCamelCase = ScoreSdeVeScheduler()
_lowerCamelCase = ScoreSdeVePipeline(unet=a__ , scheduler=a__ )
sde_ve.to(a__ )
sde_ve.set_progress_bar_config(disable=a__ )
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=a__ ).images
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=a__ , return_dict=a__ )[
0
]
_lowerCamelCase = image[0, -3:, -3:, -1]
_lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __a ( unittest.TestCase ):
def snake_case_ ( self ):
_lowerCamelCase = 'google/ncsnpp-church-256'
_lowerCamelCase = UNetaDModel.from_pretrained(a__ )
_lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(a__ )
_lowerCamelCase = ScoreSdeVePipeline(unet=a__ , scheduler=a__ )
sde_ve.to(a__ )
sde_ve.set_progress_bar_config(disable=a__ )
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=a__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 80 |
"""simple docstring"""
from math import factorial
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 20 )-> int:
_lowerCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_lowerCamelCase = n // 2
return int(factorial(snake_case ) / (factorial(snake_case ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
A_ : Optional[Any] =int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 80 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
_SCREAMING_SNAKE_CASE : List[str] = 'ViltImageProcessor'
_SCREAMING_SNAKE_CASE : Optional[Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_lowercase : int = kwargs.pop("feature_extractor" )
_lowercase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
_lowercase : int = self.image_processor
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : Dict = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel_values + pixel_mask
_lowercase : List[Any] = self.image_processor(__a , return_tensors=__a )
encoding.update(__a )
return encoding
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__a , **__a )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__a , **__a )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = self.tokenizer.model_input_names
_lowercase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 250 |
import argparse
import copy
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = {}
with open(_lowerCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowerCAmelCase : Tuple = []
_list.append([line.split()[1], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowerCAmelCase : str = []
_list.append([line.split()[0], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : str = f.read(1 )
_lowerCAmelCase : str = start_node
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Any = start_node
_lowerCAmelCase : str = 0
while visiting not in first_solution:
_lowerCAmelCase : Dict = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowerCamelCase ) and k[0] not in first_solution:
_lowerCAmelCase : List[str] = k[1]
_lowerCAmelCase : List[Any] = k[0]
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = distance_of_first_solution + int(_lowerCamelCase )
_lowerCAmelCase : str = best_node
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowerCAmelCase : Tuple = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
for n in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
for kn in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
if n == kn:
continue
_lowerCAmelCase : Optional[int] = copy.deepcopy(_lowerCamelCase )
_lowerCAmelCase : int = kn
_lowerCAmelCase : Dict = n
_lowerCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_lowerCAmelCase : str = _tmp[_tmp.index(_lowerCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowerCAmelCase : Optional[Any] = distance + int(i[1] )
_tmp.append(_lowerCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowerCAmelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _lowerCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : int = first_solution
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Tuple = distance_of_first_solution
_lowerCAmelCase : Optional[int] = solution
while count <= iters:
_lowerCAmelCase : int = find_neighborhood(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = neighborhood[index_of_best_solution]
_lowerCAmelCase : int = len(_lowerCamelCase ) - 1
_lowerCAmelCase : Union[str, Any] = False
while not found:
_lowerCAmelCase : Tuple = 0
while i < len(_lowerCamelCase ):
if best_solution[i] != solution[i]:
_lowerCAmelCase : str = best_solution[i]
_lowerCAmelCase : Tuple = solution[i]
break
_lowerCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[Any] = best_solution[:-1]
_lowerCAmelCase : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowerCAmelCase : Union[str, Any] = cost
_lowerCAmelCase : List[Any] = solution
else:
_lowerCAmelCase : Optional[Any] = index_of_best_solution + 1
_lowerCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
if len(_lowerCamelCase ) >= size:
tabu_list.pop(0 )
_lowerCAmelCase : int = count + 1
return best_solution_ever, best_cost
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = generate_neighbours(args.File )
_lowerCAmelCase , _lowerCAmelCase : List[str] = generate_first_solution(
args.File , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = tabu_search(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 36 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = get_activation("swish")
self.assertIsInstance(_lowerCamelCase , nn.SiLU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = get_activation("silu")
self.assertIsInstance(_lowerCamelCase , nn.SiLU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = get_activation("mish")
self.assertIsInstance(_lowerCamelCase , nn.Mish)
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = get_activation("gelu")
self.assertIsInstance(_lowerCamelCase , nn.GELU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20) | 368 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__snake_case : Tuple = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _lowercase ( __snake_case ,__snake_case ,__snake_case=None ) -> str:
if rng is None:
__lowerCAmelCase : str = random.Random()
__lowerCAmelCase : List[Any] = 1
for dim in shape:
total_dims *= dim
__lowerCAmelCase : int = []
for _ in range(__snake_case ):
values.append(rng.randint(0 ,vocab_size - 1 ) )
__lowerCAmelCase : Dict = np.array(__snake_case ,dtype=jnp.intaa ).reshape(__snake_case )
return output
def _lowercase ( __snake_case ,__snake_case=None ) -> Optional[Any]:
__lowerCAmelCase : List[str] = ids_tensor(__snake_case ,vocab_size=2 ,rng=__snake_case )
# make sure that at least one token is attended to for each batch
__lowerCAmelCase : str = 1
return attn_mask
@require_flax
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = ()
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__lowerCAmelCase : Tuple = 2
__lowerCAmelCase : Dict = inputs["input_ids"].shape[-1] // 2
__lowerCAmelCase : Union[str, Any] = inputs["input_ids"][:max_batch_size, :sequence_length]
__lowerCAmelCase : str = jnp.ones_like(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__lowerCAmelCase : Dict = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__lowerCAmelCase : int = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self._get_input_ids_and_config()
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Dict = max_length
__lowerCAmelCase : Any = 0
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = pt_model_class(_SCREAMING_SNAKE_CASE).eval()
__lowerCAmelCase : Optional[int] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , flax_model.params)
__lowerCAmelCase : int = flax_model.generate(_SCREAMING_SNAKE_CASE).sequences
__lowerCAmelCase : Any = pt_model.generate(torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__lowerCAmelCase : Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = jit(model.generate)
__lowerCAmelCase : List[str] = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
__lowerCAmelCase : Dict = True
__lowerCAmelCase : List[str] = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = jit(model.generate)
__lowerCAmelCase : Optional[Any] = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = self._get_input_ids_and_config()
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Tuple = max_length
__lowerCAmelCase : Any = 2
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Optional[int] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = jit(model.generate)
__lowerCAmelCase : Dict = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: str) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = self._get_input_ids_and_config()
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Any = max_length
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : int = 2
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._get_input_ids_and_config()
__lowerCAmelCase : str = True
__lowerCAmelCase : Tuple = max_length
__lowerCAmelCase : Tuple = 0.8
__lowerCAmelCase : Any = 10
__lowerCAmelCase : Any = 0.3
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : int = 8
__lowerCAmelCase : Optional[int] = 9
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = jit(model.generate)
__lowerCAmelCase : str = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = self._get_input_ids_and_config()
__lowerCAmelCase : int = max_length
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 8
__lowerCAmelCase : str = 9
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = jit(model.generate)
__lowerCAmelCase : str = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._get_input_ids_and_config()
__lowerCAmelCase : Union[str, Any] = max_length
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : int = 8
__lowerCAmelCase : str = 9
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = jit(model.generate)
__lowerCAmelCase : Union[str, Any] = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: str) -> Any:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCAmelCase : Tuple = attention_mask.at[(0, 0)].set(0)
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : int = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model.generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = jit(model.generate)
__lowerCAmelCase : Dict = jit_generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCAmelCase : int = attention_mask.at[(0, 0)].set(0)
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model.generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = jit(model.generate)
__lowerCAmelCase : Any = jit_generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCAmelCase : int = attention_mask.at[(0, 0)].set(0)
__lowerCAmelCase : Tuple = 2
__lowerCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model.generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = jit(model.generate)
__lowerCAmelCase : int = jit_generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert")
__lowerCAmelCase : Optional[int] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
__lowerCAmelCase : Optional[Any] = "Hello world"
__lowerCAmelCase : str = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="np").input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , "do_samples"):
model.generate(_SCREAMING_SNAKE_CASE , do_samples=_SCREAMING_SNAKE_CASE)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , "foo"):
__lowerCAmelCase : int = {"foo": "bar"}
model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) | 58 | 0 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = "mock-s3-bucket"
__magic_name__ : Any = F'''s3://{mock_bucket}'''
__magic_name__ : str = extract_path_from_uri(_snake_case )
assert dataset_path.startswith("s3://" ) is False
__magic_name__ : Tuple = "./local/path"
__magic_name__ : Optional[Any] = extract_path_from_uri(_snake_case )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : str = is_remote_filesystem(_snake_case )
assert is_remote is True
__magic_name__ : Optional[int] = fsspec.filesystem("file" )
__magic_name__ : int = is_remote_filesystem(_snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
__magic_name__ : Any = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
__magic_name__ : str = input_paths[compression_fs_class.protocol]
if input_path is None:
__magic_name__ : Dict = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_snake_case )
__magic_name__ : str = fsspec.filesystem(compression_fs_class.protocol , fo=_snake_case )
assert isinstance(_snake_case , _snake_case )
__magic_name__ : int = os.path.basename(_snake_case )
__magic_name__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(_snake_case , "r" , encoding="utf-8" ) as f, open(_snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
__magic_name__ : int = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
__magic_name__ : int = compressed_file_paths[protocol]
__magic_name__ : Tuple = "dataset.jsonl"
__magic_name__ : List[str] = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
__magic_name__ , *__magic_name__ : Optional[Any] = fsspec.get_fs_token_paths(_snake_case )
assert fs.isfile(_snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Tuple ) -> str:
'''simple docstring'''
__magic_name__ : int = hf_api.dataset_info(_snake_case , token=_snake_case )
__magic_name__ : Optional[Any] = HfFileSystem(repo_info=_snake_case , token=_snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(_snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[Any] = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_snake_case , _snake_case , clobber=_snake_case )
with pytest.warns(_snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_snake_case ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 281 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_a , **_a ):
pass
def lowerCAmelCase_ ( _snake_case : Image ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCAmelCase_ ( _snake_case : Image ) -> Dict:
'''simple docstring'''
__magic_name__ : List[Any] = np.array(_snake_case )
__magic_name__ : Optional[int] = npimg.shape
return {"hash": hashimage(_snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
UpperCamelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Dict = MaskGenerationPipeline(model=_a , image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
__magic_name__ : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = "facebook/sam-vit-huge"
__magic_name__ : str = pipeline("mask-generation" , model=_a )
__magic_name__ : Tuple = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Any = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 281 | 1 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _snake_case ( a_ ):
def __init__( self , *_lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase):
super().__init__(*lowercase_ , **lowercase_)
UpperCAmelCase__ : Optional[int] = eval_examples
UpperCAmelCase__ : str = post_process_function
def snake_case__ ( self , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = "eval" , **_lowerCamelCase , ):
UpperCAmelCase__ : Optional[Any] = gen_kwargs.copy()
UpperCAmelCase__ : Any = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""") is not None else self.args.generation_max_length
)
UpperCAmelCase__ : List[Any] = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""") is not None else self.args.generation_num_beams
)
UpperCAmelCase__ : List[Any] = gen_kwargs
UpperCAmelCase__ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase__ : Any = self.get_eval_dataloader(lowercase_)
UpperCAmelCase__ : List[str] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ : List[Any] = self.compute_metrics
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Tuple = time.time()
UpperCAmelCase__ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase__ : int = eval_loop(
lowercase_ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
UpperCAmelCase__ : Dict = compute_metrics
UpperCAmelCase__ : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase__ : List[str] = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
UpperCAmelCase__ : Tuple = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
UpperCAmelCase__ : List[str] = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
UpperCAmelCase__ : int = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
UpperCAmelCase__ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase = "test" , **_lowerCamelCase):
UpperCAmelCase__ : Tuple = gen_kwargs.copy()
UpperCAmelCase__ : Tuple = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ : Optional[Any] = self.compute_metrics
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Tuple = time.time()
UpperCAmelCase__ : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase__ : Dict = eval_loop(
lowercase_ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
UpperCAmelCase__ : List[str] = compute_metrics
UpperCAmelCase__ : int = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase__ : str = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , """predict""")
UpperCAmelCase__ : Optional[int] = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
UpperCAmelCase__ : Any = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_) | 364 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__A =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ : Any = g.get_repo("""huggingface/diffusers""" )
UpperCAmelCase__ : Optional[int] = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ : Any = sorted(issue.get_comments() , key=lambda UpperCamelCase__ : i.created_at , reverse=UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main() | 283 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = MgpstrTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = {}
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
# fmt: off
UpperCamelCase__: Union[str, Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
UpperCamelCase__: int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
UpperCamelCase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
def UpperCAmelCase_ ( self: Dict , **__lowerCamelCase: List[str] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Tuple = "tester"
UpperCamelCase__: Optional[int] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Dict = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCamelCase__: Optional[int] = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCamelCase__: Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
UpperCamelCase__: str = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCamelCase__ , UpperCamelCase__: Optional[int] = self.get_input_output_texts(__lowerCamelCase )
UpperCamelCase__: Tuple = tokenizer.tokenize(__lowerCamelCase )
UpperCamelCase__: List[Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
UpperCamelCase__: int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertNotEqual(len(__lowerCamelCase ) , 0 )
UpperCamelCase__: int = tokenizer.decode(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(text_a.replace(" " , "" ) , __lowerCamelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
pass
| 149 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = (DDIMParallelScheduler,)
UpperCamelCase__ = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: Any = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCamelCase )
return config
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: str = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(**__lowerCamelCase )
UpperCamelCase__: List[str] = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: int = 10, 0.0
UpperCamelCase__: List[Any] = self.dummy_model()
UpperCamelCase__: Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for t in scheduler.timesteps:
UpperCamelCase__: Optional[Any] = model(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
UpperCamelCase__: Tuple = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__lowerCamelCase , num_inference_steps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCamelCase , eta=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Any = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config()
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.scheduler_classes[0]
UpperCamelCase__: Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__: Any = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = 10, 0.0
scheduler.set_timesteps(__lowerCamelCase )
UpperCamelCase__: Tuple = self.dummy_model()
UpperCamelCase__: Union[str, Any] = self.dummy_sample_deter
UpperCamelCase__: Dict = self.dummy_sample_deter + 0.1
UpperCamelCase__: Dict = self.dummy_sample_deter - 0.1
UpperCamelCase__: int = samplea.shape[0]
UpperCamelCase__: List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase__: Union[str, Any] = torch.arange(__lowerCamelCase )[0:3, None].repeat(1 , __lowerCamelCase )
UpperCamelCase__: str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase__: Optional[int] = scheduler.batch_step_no_noise(__lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __lowerCamelCase )
UpperCamelCase__: Dict = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Tuple = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = self.full_loop()
UpperCamelCase__: List[str] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase__: List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[int] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 149 | 1 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _a ( unittest.TestCase ):
@property
def snake_case ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : int = UNetaDModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def snake_case ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.dummy_uncond_unet
_UpperCamelCase : Optional[Any] = ScoreSdeVeScheduler()
_UpperCamelCase : Dict = ScoreSdeVePipeline(unet=lowerCAmelCase__, scheduler=lowerCAmelCase__ )
sde_ve.to(lowerCAmelCase__ )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : List[str] = torch.manual_seed(0 )
_UpperCamelCase : Tuple = sde_ve(num_inference_steps=2, output_type='''numpy''', generator=lowerCAmelCase__ ).images
_UpperCamelCase : Dict = torch.manual_seed(0 )
_UpperCamelCase : List[str] = sde_ve(num_inference_steps=2, output_type='''numpy''', generator=lowerCAmelCase__, return_dict=lowerCAmelCase__ )[
0
]
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCamelCase : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _a ( unittest.TestCase ):
def snake_case ( self : str ) -> Dict:
'''simple docstring'''
_UpperCamelCase : int = '''google/ncsnpp-church-256'''
_UpperCamelCase : List[Any] = UNetaDModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase : str = ScoreSdeVeScheduler.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase : str = ScoreSdeVePipeline(unet=lowerCAmelCase__, scheduler=lowerCAmelCase__ )
sde_ve.to(lowerCAmelCase__ )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Dict = torch.manual_seed(0 )
_UpperCamelCase : List[str] = sde_ve(num_inference_steps=1_0, output_type='''numpy''', generator=lowerCAmelCase__ ).images
_UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_UpperCamelCase : Optional[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 370 |
"""simple docstring"""
from __future__ import annotations
import math
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(_lowercase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
def a_ ( ):
_UpperCamelCase : Dict = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCamelCase : int = math.log(len(_lowercase ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , _lowercase , _lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 128 | 0 |
"""simple docstring"""
import math
def _lowercase ( __lowerCAmelCase ) -> bool:
return math.sqrt(__lowerCAmelCase ) * math.sqrt(__lowerCAmelCase ) == num
def _lowercase ( __lowerCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n
while left <= right:
SCREAMING_SNAKE_CASE__ : int = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
SCREAMING_SNAKE_CASE__ : Any = mid - 1
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 132 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a :Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
a :Tuple = 50_003
a :Optional[int] = 50_002
@require_sentencepiece
@require_tokenizers
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = PLBartTokenizer
_SCREAMING_SNAKE_CASE :List[str] = None
_SCREAMING_SNAKE_CASE :Any = False
def _a ( self ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Any = PLBartTokenizer(_a , language_codes="""base""" , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = PLBartTokenizer(_a , language_codes="""base""" , keep_accents=_a )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Dict = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 4 , _a )]
self.assertListEqual(_a , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
SCREAMING_SNAKE_CASE__ : List[str] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
SCREAMING_SNAKE_CASE__ : Any = tokenizer(_a ).input_ids
self.assertEqual(
tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = PLBartTokenizer(_a , language_codes="""multi""" , keep_accents=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ : str = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 7 , _a )]
self.assertListEqual(
_a , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
SCREAMING_SNAKE_CASE__ : Tuple = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(_a ).input_ids
self.assertEqual(
tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = """uclanlp/plbart-python-en_XX"""
_SCREAMING_SNAKE_CASE :List[Any] = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
_SCREAMING_SNAKE_CASE :Optional[Any] = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
_SCREAMING_SNAKE_CASE :str = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def _a ( cls ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
SCREAMING_SNAKE_CASE__ : Any = 1
return cls
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 50_003 )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _a )
def _a ( self ) -> Dict:
"""simple docstring"""
self.assertIn(_a , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : Tuple = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer.decode(_a , skip_special_tokens=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
self.assertNotIn(self.tokenizer.eos_token , _a )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(_a , max_length=_a , truncation=_a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _a )
self.assertEqual(len(_a ) , _a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [50_004, 50_001] )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Dict = PLBartTokenizer.from_pretrained(_a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _a )
@require_torch
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_a , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_a , truncation=_a , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text , padding=_a , truncation=_a , max_length=3 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=_a , truncation=_a , max_length=10 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : List[Any] = targets["""input_ids"""]
SCREAMING_SNAKE_CASE__ : Dict = shift_tokens_right(_a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(_a ) , {
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 50_003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 50_001,
} , )
| 132 | 1 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {"vocab_file": "vocab.txt"}
__UpperCamelCase : Optional[int] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
__UpperCamelCase : Optional[Any] = {
"facebook/esm2_t6_8M_UR50D": 1024,
"facebook/esm2_t12_35M_UR50D": 1024,
}
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase__ : Optional[int] = f.read().splitlines()
return [l.strip() for l in lines]
class __magic_name__ ( __lowerCAmelCase):
A: int = VOCAB_FILES_NAMES
A: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A: str = ["input_ids", "attention_mask"]
def __init__( self : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any]="<unk>" , lowerCamelCase__ : Union[str, Any]="<cls>" , lowerCamelCase__ : str="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Optional[int]="<eos>" , **lowerCamelCase__ : Tuple , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ : List[Any] = load_vocab_file(lowerCamelCase__ )
UpperCamelCase__ : Dict = dict(enumerate(self.all_tokens ) )
UpperCamelCase__ : Union[str, Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCamelCase__ : Any = unk_token
UpperCamelCase__ : Optional[int] = cls_token
UpperCamelCase__ : Any = pad_token
UpperCamelCase__ : str = mask_token
UpperCamelCase__ : int = eos_token
UpperCamelCase__ : Union[str, Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(lowerCamelCase__ , self.unk_token )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(lowerCamelCase__ , self._token_to_id.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ) -> List[str]:
'''simple docstring'''
return text.split()
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[int]=False ) -> str:
'''simple docstring'''
return len(self._id_to_token )
def UpperCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(lowerCamelCase__ , self._token_to_id.get(self.unk_token ) )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(lowerCamelCase__ , self.unk_token )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ : Tuple = [self.cls_token_id]
UpperCamelCase__ : Tuple = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List , lowerCamelCase__ : Optional[List] = None , lowerCamelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCamelCase__ : Optional[int] = [1] + ([0] * len(lowerCamelCase__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase__ ) + [1]
return mask
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : str = os.path.join(lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=lowerCamelCase__ )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Union[List[str], List[AddedToken]] , lowerCamelCase__ : bool = False ) -> int:
'''simple docstring'''
return super()._add_tokens(lowerCamelCase__ , special_tokens=lowerCamelCase__ )
| 51 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( __lowerCAmelCase):
A: Optional[int] = "vit"
def __init__( self : Optional[int] , lowerCamelCase__ : Union[str, Any]=768 , lowerCamelCase__ : Optional[int]=12 , lowerCamelCase__ : Optional[int]=12 , lowerCamelCase__ : Any=3072 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : Tuple=0.02 , lowerCamelCase__ : str=1E-1_2 , lowerCamelCase__ : Dict=224 , lowerCamelCase__ : int=16 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Optional[int]=16 , **lowerCamelCase__ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : List[str] = intermediate_size
UpperCamelCase__ : Optional[Any] = hidden_act
UpperCamelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase__ : List[Any] = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = layer_norm_eps
UpperCamelCase__ : Optional[int] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : Optional[Any] = num_channels
UpperCamelCase__ : List[Any] = qkv_bias
UpperCamelCase__ : Union[str, Any] = encoder_stride
class __magic_name__ ( __lowerCAmelCase):
A: int = version.parse("1.11")
@property
def UpperCAmelCase__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ ( self : Tuple ) -> float:
'''simple docstring'''
return 1E-4
| 51 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( snake_case__ ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_lowerCAmelCase = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@staticmethod
def a_ (_UpperCAmelCase ) -> Dict:
__UpperCamelCase : int = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=_UpperCAmelCase , required=_UpperCAmelCase , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=_UpperCAmelCase , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , ) -> List[Any]:
__UpperCamelCase : Optional[int] = logging.get_logger("transformers-cli/converting" )
self._logger.info(f"Loading model {model_type}" )
__UpperCamelCase : Union[str, Any] = model_type
__UpperCamelCase : Optional[int] = tf_checkpoint
__UpperCamelCase : Any = pytorch_dump_output
__UpperCamelCase : List[Any] = config
__UpperCamelCase : Dict = finetuning_task_name
def a_ (self ) -> Optional[int]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
__UpperCamelCase : str = self._tf_checkpoint
__UpperCamelCase : Union[str, Any] = ""
else:
__UpperCamelCase : Dict = self._tf_checkpoint
__UpperCamelCase : Optional[Any] = ""
convert_transfo_xl_checkpoint_to_pytorch(
_UpperCAmelCase , self._config , self._pytorch_dump_output , _UpperCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 298 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations(snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations_with_dp_array(
snake_case__ , snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__UpperCamelCase : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__UpperCamelCase : List[str] = answer
return answer
__UpperCamelCase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = [0] * (target + 1)
__UpperCamelCase : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = 3
_lowerCAmelCase = 5
_lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 298 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _snake_case ( snake_case ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , "tf_padding" ) )
self.parent.assertTrue(hasattr(_a , "depth_multiplier" ) )
class _snake_case :
def __init__( self , _a , _a=13 , _a=3 , _a=32 , _a=0.25 , _a=8 , _a=True , _a=1_024 , _a=32 , _a="relu6" , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , ):
__magic_name__ : Optional[int] = parent
__magic_name__ : Union[str, Any] = batch_size
__magic_name__ : Tuple = num_channels
__magic_name__ : Tuple = image_size
__magic_name__ : Any = depth_multiplier
__magic_name__ : Any = min_depth
__magic_name__ : Any = tf_padding
__magic_name__ : int = int(last_hidden_size * depth_multiplier )
__magic_name__ : Any = output_stride
__magic_name__ : Tuple = hidden_act
__magic_name__ : Optional[int] = classifier_dropout_prob
__magic_name__ : Any = use_labels
__magic_name__ : str = is_training
__magic_name__ : List[str] = num_labels
__magic_name__ : str = initializer_range
__magic_name__ : Union[str, Any] = scope
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Any = None
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__magic_name__ : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a ):
__magic_name__ : List[str] = MobileNetVaModel(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Optional[int] = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a ):
__magic_name__ : Any = self.num_labels
__magic_name__ : Dict = MobileNetVaForImageClassification(_a )
model.to(_a )
model.eval()
__magic_name__ : Any = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = config_and_inputs
__magic_name__ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = MobileNetVaModelTester(self )
__magic_name__ : List[str] = MobileNetVaConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : str = model_class(_a )
__magic_name__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
def check_hidden_states_output(_a , _a , _a ):
__magic_name__ : Dict = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__magic_name__ : Tuple = model(**self._prepare_for_class(_a , _a ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : Optional[Any] = 26
self.assertEqual(len(_a ) , _a )
__magic_name__ , __magic_name__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Dict = True
check_hidden_states_output(_a , _a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[str] = MobileNetVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
__magic_name__ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(_a )
__magic_name__ : Dict = self.default_image_processor
__magic_name__ : List[str] = prepare_img()
__magic_name__ : Optional[Any] = image_processor(images=_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
__magic_name__ : Union[str, Any] = model(**_a )
# verify the logits
__magic_name__ : List[str] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , _a )
__magic_name__ : Union[str, Any] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 41 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : int = logging.get_logger(__name__)
def lowerCAmelCase_ ( _snake_case : str ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__magic_name__ : int = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _snake_case )
if matches:
__magic_name__ : List[str] = float(matches[1] )
__magic_name__ : Dict = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__magic_name__ : List[str] = 1001
__magic_name__ : Tuple = "imagenet-1k-id2label.json"
__magic_name__ : Union[str, Any] = "huggingface/label-files"
__magic_name__ : str = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
__magic_name__ : Tuple = {int(_snake_case ) + 1: v for k, v in idalabel.items()}
__magic_name__ : Dict = "background"
__magic_name__ : str = idalabel
__magic_name__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__magic_name__ : Any = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Optional[int] , _snake_case : Any , _snake_case : int=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : int = get_mobilenet_va_config(_snake_case )
# Load 🤗 model
__magic_name__ : List[Any] = MobileNetVaForImageClassification(_snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_snake_case , _snake_case , _snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__magic_name__ : Dict = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
__magic_name__ : int = image_processor(images=prepare_img() , return_tensors="pt" )
__magic_name__ : Any = model(**_snake_case )
__magic_name__ : Dict = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__magic_name__ : Tuple = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
__magic_name__ : Optional[Any] = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
__magic_name__ : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print("Pushing to the hub..." )
__magic_name__ : List[str] = "google/" + model_name
image_processor.push_to_hub(_snake_case )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
snake_case : str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 41 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any]=13 , lowerCamelCase_ : List[str]=7 , lowerCamelCase_ : Any=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]=99 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : Dict=36 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : str=6 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : List[Any]=5_12 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Dict=0.02 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : str = embedding_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_hidden_groups
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE : int = type_sequence_label_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Any = num_choices
SCREAMING_SNAKE_CASE : Dict = scope
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : int = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = AlbertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = AlbertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , sentence_order_label=lowerCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = AlbertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AlbertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = AlbertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = AlbertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_choices
SCREAMING_SNAKE_CASE : List[Any] = AlbertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = AlbertModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Dict = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = AlbertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = AlbertModel.from_pretrained("""albert-base-v2""" )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
SCREAMING_SNAKE_CASE : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4 ) )
| 323 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean
SCREAMING_SNAKE_CASE : str = image_std
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 323 | 1 |
import re
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
lowerCamelCase__ : Dict = re.compile(
r'^(?:0|94|\+94|0{2}94)' r'7(0|1|2|4|5|6|7|8)' r'(-| |)' r'\d{7}$' )
return bool(re.search(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 369 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : Tuple = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = ["""PerceiverFeatureExtractor"""]
_UpperCAmelCase : Dict = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45 | 0 |
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : Dict ) -> Dict:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def a_ ( __snake_case : Tuple , __snake_case : Union[str, Any]=0 ) -> List[str]:
"""simple docstring"""
return sorted(__snake_case , key=lambda __snake_case : x[column] )
def a_ ( __snake_case : Any , __snake_case : Dict , __snake_case : Tuple=float('''inf''' ) ) -> Union[str, Any]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , __snake_case ):
lowerCamelCase_ =euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCamelCase_ =current_dis
return min_dis
def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Optional[int]=float('''inf''' ) ) -> str:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , __snake_case ):
for j in range(max(0 , i - 6 ) , __snake_case ):
lowerCamelCase_ =euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCamelCase_ =current_dis
return min_dis
def a_ ( __snake_case : Any , __snake_case : Optional[int] , __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
# base case
if points_counts <= 3:
return dis_between_closest_pair(__snake_case , __snake_case )
# recursion
lowerCamelCase_ =points_counts // 2
lowerCamelCase_ =closest_pair_of_points_sqr(
__snake_case , points_sorted_on_y[:mid] , __snake_case )
lowerCamelCase_ =closest_pair_of_points_sqr(
__snake_case , points_sorted_on_y[mid:] , points_counts - mid )
lowerCamelCase_ =min(__snake_case , __snake_case )
lowerCamelCase_ =[]
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__snake_case )
lowerCamelCase_ =dis_between_closest_in_strip(
__snake_case , len(__snake_case ) , __snake_case )
return min(__snake_case , __snake_case )
def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =column_based_sort(__snake_case , column=0 )
lowerCamelCase_ =column_based_sort(__snake_case , column=1 )
return (
closest_pair_of_points_sqr(
__snake_case , __snake_case , __snake_case )
) ** 0.5
if __name__ == "__main__":
a_ : Optional[int] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 75 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a_ : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={}
if "candidate_labels" in kwargs:
lowerCamelCase_ =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowerCamelCase_ =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(lowerCAmelCase, lowerCAmelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCamelCase_ =requests.get(lowerCAmelCase ).content
else:
with open(lowerCAmelCase, '''rb''' ) as f:
lowerCamelCase_ =f.read()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =ffmpeg_read(lowerCAmelCase, self.feature_extractor.sampling_rate )
if not isinstance(lowerCAmelCase, np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
lowerCamelCase_ =self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' )
lowerCamelCase_ =candidate_labels
lowerCamelCase_ =[hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels]
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework, padding=lowerCAmelCase )
lowerCamelCase_ =[text_inputs]
return inputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model_inputs.pop('''candidate_labels''' )
lowerCamelCase_ =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0], lowerCAmelCase ):
lowerCamelCase_ =text_inputs[0]
else:
# Batching case.
lowerCamelCase_ =text_inputs[0][0]
lowerCamelCase_ =self.model(**lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model_outputs.pop('''candidate_labels''' )
lowerCamelCase_ =model_outputs['''logits'''][0]
if self.framework == "pt":
lowerCamelCase_ =logits.softmax(dim=0 )
lowerCamelCase_ =probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
lowerCamelCase_ =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase, lowerCAmelCase ), key=lambda lowerCAmelCase : -x[0] )
]
return result
| 75 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__lowerCamelCase : List[Any] = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__lowerCamelCase : int = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : str = (images / 2 + 0.5).clamp(0 , 1 )
snake_case__ : Any = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case__ : List[Any] = numpy_to_pil(snake_case_ )
return images
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
if images.ndim == 3:
snake_case__ : Optional[int] = images[None, ...]
snake_case__ : Optional[int] = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
snake_case__ : Dict = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
snake_case__ : List[str] = [Image.fromarray(snake_case_ ) for image in images]
return pil_images
| 286 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 | 1 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase_ ( a_ , a_ ):
'''simple docstring'''
@register_to_config
def __init__( self , a = 1_28 , a = 2_56 , a = 20_00.0 , a = 7_68 , a = 12 , a = 12 , a = 64 , a = 20_48 , a = 0.1 , ) -> List[Any]:
super().__init__()
snake_case_ = nn.Sequential(
nn.Linear(snake_case__ , d_model * 4 , bias=snake_case__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=snake_case__ ) , nn.SiLU() , )
snake_case_ = nn.Embedding(snake_case__ , snake_case__ )
snake_case_ = False
snake_case_ = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
snake_case_ = nn.Dropout(p=snake_case__ )
snake_case_ = nn.ModuleList()
for lyr_num in range(snake_case__ ):
# FiLM conditional T5 decoder
snake_case_ = DecoderLayer(d_model=snake_case__ , d_kv=snake_case__ , num_heads=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ )
self.decoders.append(snake_case__ )
snake_case_ = TaLayerNorm(snake_case__ )
snake_case_ = nn.Dropout(p=snake_case__ )
snake_case_ = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
def _UpperCamelCase ( self , a , a ) -> int:
snake_case_ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _UpperCamelCase ( self , a , a , a ) -> Optional[int]:
snake_case_ , snake_case_ , snake_case_ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case_ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
snake_case_ = self.conditioning_emb(snake_case__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case_ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case_ = torch.broadcast_to(
torch.arange(snake_case__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
snake_case_ = self.position_encoding(snake_case__ )
snake_case_ = self.continuous_inputs_projection(snake_case__ )
inputs += position_encodings
snake_case_ = self.dropout(snake_case__ )
# decoder: No padding present.
snake_case_ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case_ = [(x, self.encoder_decoder_mask(snake_case__ , snake_case__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case_ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
snake_case_ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
snake_case_ = lyr(
snake_case__ , conditioning_emb=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )[0]
snake_case_ = self.decoder_norm(snake_case__ )
snake_case_ = self.post_dropout(snake_case__ )
snake_case_ = self.spec_out(snake_case__ )
return spec_out
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , a , a , a , a , a , a=1E-6 ) -> List[str]:
super().__init__()
snake_case_ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=snake_case__ , d_kv=snake_case__ , num_heads=snake_case__ , dropout_rate=snake_case__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=snake_case__ , d_kv=snake_case__ , num_heads=snake_case__ , dropout_rate=snake_case__ , layer_norm_epsilon=snake_case__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ , layer_norm_epsilon=snake_case__ ) )
def _UpperCamelCase ( self , a , a=None , a=None , a=None , a=None , a=None , ) -> Dict:
snake_case_ = self.layer[0](
snake_case__ , conditioning_emb=snake_case__ , attention_mask=snake_case__ , )
if encoder_hidden_states is not None:
snake_case_ = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
snake_case_ = self.layer[1](
snake_case__ , key_value_states=snake_case__ , attention_mask=snake_case__ , )
# Apply Film Conditional Feed Forward layer
snake_case_ = self.layer[-1](snake_case__ , snake_case__ )
return (hidden_states,)
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , a , a , a , a ) -> List[str]:
super().__init__()
snake_case_ = TaLayerNorm(snake_case__ )
snake_case_ = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case__ )
snake_case_ = Attention(query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , out_bias=snake_case__ , scale_qk=snake_case__ )
snake_case_ = nn.Dropout(snake_case__ )
def _UpperCamelCase ( self , a , a=None , a=None , ) -> Union[str, Any]:
snake_case_ = self.layer_norm(snake_case__ )
if conditioning_emb is not None:
snake_case_ = self.FiLMLayer(snake_case__ , snake_case__ )
# Self-attention block
snake_case_ = self.attention(snake_case__ )
snake_case_ = hidden_states + self.dropout(snake_case__ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , a , a , a , a , a ) -> int:
super().__init__()
snake_case_ = Attention(query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , out_bias=snake_case__ , scale_qk=snake_case__ )
snake_case_ = TaLayerNorm(snake_case__ , eps=snake_case__ )
snake_case_ = nn.Dropout(snake_case__ )
def _UpperCamelCase ( self , a , a=None , a=None , ) -> List[Any]:
snake_case_ = self.layer_norm(snake_case__ )
snake_case_ = self.attention(
snake_case__ , encoder_hidden_states=snake_case__ , attention_mask=attention_mask.squeeze(1 ) , )
snake_case_ = hidden_states + self.dropout(snake_case__ )
return layer_output
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , a , a , a , a ) -> List[Any]:
super().__init__()
snake_case_ = TaDenseGatedActDense(d_model=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ )
snake_case_ = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case__ )
snake_case_ = TaLayerNorm(snake_case__ , eps=snake_case__ )
snake_case_ = nn.Dropout(snake_case__ )
def _UpperCamelCase ( self , a , a=None ) -> Any:
snake_case_ = self.layer_norm(snake_case__ )
if conditioning_emb is not None:
snake_case_ = self.film(snake_case__ , snake_case__ )
snake_case_ = self.DenseReluDense(snake_case__ )
snake_case_ = hidden_states + self.dropout(snake_case__ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , a , a , a ) -> Optional[Any]:
super().__init__()
snake_case_ = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
snake_case_ = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
snake_case_ = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
snake_case_ = nn.Dropout(snake_case__ )
snake_case_ = NewGELUActivation()
def _UpperCamelCase ( self , a ) -> Tuple:
snake_case_ = self.act(self.wi_a(snake_case__ ) )
snake_case_ = self.wi_a(snake_case__ )
snake_case_ = hidden_gelu * hidden_linear
snake_case_ = self.dropout(snake_case__ )
snake_case_ = self.wo(snake_case__ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , a , a=1E-6 ) -> Any:
super().__init__()
snake_case_ = nn.Parameter(torch.ones(snake_case__ ) )
snake_case_ = eps
def _UpperCamelCase ( self , a ) -> List[str]:
snake_case_ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=snake_case__ )
snake_case_ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case_ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
def _UpperCamelCase ( self , a ) -> List[str]:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(snake_case__ , 3.0 )) ))
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , a , a ) -> List[str]:
super().__init__()
snake_case_ = nn.Linear(snake_case__ , out_features * 2 , bias=snake_case__ )
def _UpperCamelCase ( self , a , a ) -> int:
snake_case_ = self.scale_bias(snake_case__ )
snake_case_ , snake_case_ = torch.chunk(snake_case__ , 2 , -1 )
snake_case_ = x * (1 + scale) + shift
return x
| 178 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] ={
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = """vit_msn"""
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-06 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = qkv_bias
| 128 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__a :Tuple = logging.get_logger(__name__)
__a :Dict = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__a :Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
__a :Optional[int] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class _a ( _UpperCamelCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = 'whisper'
_lowerCamelCase : Any = ['past_key_values']
_lowerCamelCase : Optional[int] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=51865 , UpperCAmelCase : Optional[Any]=80 , UpperCAmelCase : Dict=6 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : List[Any]=6 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Dict=1536 , UpperCAmelCase : Any=1536 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Tuple=50257 , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : str=256 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Dict=0.02 , UpperCAmelCase : int=False , UpperCAmelCase : Union[str, Any]=1500 , UpperCAmelCase : str=448 , UpperCAmelCase : Union[str, Any]=50256 , UpperCAmelCase : int=50256 , UpperCAmelCase : Optional[int]=50256 , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=[220, 50256] , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Tuple=256 , UpperCAmelCase : List[Any]=False , UpperCAmelCase : List[Any]=0.05 , UpperCAmelCase : Any=10 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Dict=10 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : str=7 , **UpperCAmelCase : str , ):
A_ = vocab_size
A_ = num_mel_bins
A_ = d_model
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = decoder_ffn_dim
A_ = encoder_ffn_dim
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = use_cache
A_ = encoder_layers
A_ = scale_embedding # scale factor will be sqrt(d_model) if True
A_ = max_source_positions
A_ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
A_ = classifier_proj_size
A_ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ = apply_spec_augment
A_ = mask_time_prob
A_ = mask_time_length
A_ = mask_time_min_masks
A_ = mask_feature_prob
A_ = mask_feature_length
A_ = mask_feature_min_masks
A_ = median_filter_width
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , suppress_tokens=_SCREAMING_SNAKE_CASE , begin_suppress_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class _a ( _UpperCamelCase ):
"""simple docstring"""
@property
def __A ( self : int ):
A_ = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
A_ = {0: "batch"}
else:
A_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="inputs" )
return common_inputs
def __A ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[str] = -1 , UpperCAmelCase : List[str] = -1 , UpperCAmelCase : Tuple = False , UpperCAmelCase : str = None , UpperCAmelCase : Optional[Any] = 22050 , UpperCAmelCase : int = 5.0 , UpperCAmelCase : str = 220 , ):
A_ = OrderedDict()
A_ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , time_duration=_SCREAMING_SNAKE_CASE , frequency=_SCREAMING_SNAKE_CASE , )
A_ = encoder_inputs["input_features"].shape[2]
A_ = encoder_sequence_length // 2 if self.use_past else seq_length
A_ = super().generate_dummy_inputs(
preprocessor.tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = encoder_inputs.pop("input_features" )
A_ = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
A_ = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def __A ( self : Tuple ):
return 1E-3 | 368 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = original_name.split("." )[0]
A_ = key.split("." )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
A_ = orig_block_num - offset
A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = OrderedDict()
A_ , A_ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A_ = key.replace("network" ,"poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ = key[: key.find("proj" )]
A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
A_ = key.replace("proj" ,"projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ = "poolformer.encoder." + key
if "mlp.fc1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" )
if "mlp.fc2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" )
if "norm1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" )
if "norm2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" )
if "layer_scale_1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" )
if "layer_scale_2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" )
if "head" in key:
A_ = key.replace("head" ,"classifier" )
A_ = value
return new_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = PoolFormerConfig()
# set attributes based on model_name
A_ = "huggingface/label-files"
A_ = model_name[-3:]
A_ = 1000
A_ = "imagenet-1k-id2label.json"
A_ = (1, 1000)
# set config attributes
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ = [2, 2, 6, 2]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s24":
A_ = [4, 4, 12, 4]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s36":
A_ = [6, 6, 18, 6]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 1E-6
A_ = 0.9
elif size == "m36":
A_ = [6, 6, 18, 6]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
elif size == "m48":
A_ = [8, 8, 24, 8]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a :int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 329 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , a__ , )
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = RobertaConfig
__SCREAMING_SNAKE_CASE : Optional[int] = 'roberta'
def __init__( self , _lowerCamelCase ) ->Tuple:
super().__init__(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = RobertaEmbeddings(_lowerCamelCase )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , a__ , )
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = RobertaConfig
__SCREAMING_SNAKE_CASE : List[Any] = 'roberta'
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
super().__init__(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = config.num_labels
SCREAMING_SNAKE_CASE : Any = config.num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = DeeRobertaModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE : Dict = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=-1 , _lowerCamelCase=False , ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = self.num_layers
try:
SCREAMING_SNAKE_CASE : Optional[int] = self.roberta(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , position_ids=_lowerCamelCase , head_mask=_lowerCamelCase , inputs_embeds=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : str = outputs[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dropout(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.classifier(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE : Union[str, Any] = e.message
SCREAMING_SNAKE_CASE : Optional[Any] = e.exit_layer
SCREAMING_SNAKE_CASE : List[str] = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE : Optional[int] = entropy(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : Dict = MSELoss()
SCREAMING_SNAKE_CASE : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : str = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE : Any = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE : str = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : List[Any] = MSELoss()
SCREAMING_SNAKE_CASE : Dict = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : Dict = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowerCamelCase )
if train_highway:
SCREAMING_SNAKE_CASE : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE : List[str] = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE : str = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE : Any = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 313 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Tuple = '''1'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Dict = '''1'''
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : Dict = '''alsa'''
SCREAMING_SNAKE_CASE : Any = '''default'''
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation'''
SCREAMING_SNAKE_CASE : Optional[int] = ''':0'''
elif system == "Windows":
SCREAMING_SNAKE_CASE : int = '''dshow'''
SCREAMING_SNAKE_CASE : Any = '''default'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s
SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Any = np.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = b''''''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
SCREAMING_SNAKE_CASE : List[str] = False
yield item
SCREAMING_SNAKE_CASE : Dict = stride_left
SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
yield item
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 313 | 1 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.nn.Linear(2 , 4 )
__SCREAMING_SNAKE_CASE = torch.optim.AdamW(model.parameters() , lr=1.0 )
__SCREAMING_SNAKE_CASE = torch.optim.lr_scheduler.OneCycleLR(lowerCAmelCase_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__SCREAMING_SNAKE_CASE = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__SCREAMING_SNAKE_CASE = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowerCAmelCase_ )
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@require_cuda
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = Accelerator(cpu=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> str:
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE = GradientState()
assert state.num_steps == 1
__SCREAMING_SNAKE_CASE = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__SCREAMING_SNAKE_CASE = False
assert state.sync_gradients is False
GradientState._reset_state()
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = create_components()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = accelerator.prepare(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def UpperCAmelCase_ ( self : Any ) -> Any:
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = create_components()
accelerator.prepare(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : int ):
pass
with patch("torch.cuda.set_device" , UpperCAmelCase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
__SCREAMING_SNAKE_CASE = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = create_components()
accelerator.prepare(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = get_signature(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCAmelCase__ )
# make sure random weights don't match
load_random_weights(UpperCAmelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase__ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(UpperCAmelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase__ ) ) < 1E-3 )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = create_components()
accelerator.prepare(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = get_signature(UpperCAmelCase__ )
# saving hook
def save_config(UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ):
__SCREAMING_SNAKE_CASE = {"class_name": models[0].__class__.__name__}
with open(os.path.join(UpperCAmelCase__ , "data.json" ) , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
# loading hook
def load_config(UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ):
with open(os.path.join(UpperCAmelCase__ , "data.json" ) , "r" ) as f:
__SCREAMING_SNAKE_CASE = json.load(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = config["class_name"]
__SCREAMING_SNAKE_CASE = accelerator.register_save_state_pre_hook(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = accelerator.register_load_state_pre_hook(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCAmelCase__ )
# make sure random weights don't match with hooks
load_random_weights(UpperCAmelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__SCREAMING_SNAKE_CASE = "random"
# make sure loaded weights match with hooks
accelerator.load_state(UpperCAmelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase__ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCAmelCase__ )
# make sure random weights don't match with hooks removed
load_random_weights(UpperCAmelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__SCREAMING_SNAKE_CASE = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(UpperCAmelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase__ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = create_components()
__SCREAMING_SNAKE_CASE = None
# This should work
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(dummy_obj is None )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = create_components()
__SCREAMING_SNAKE_CASE = [1, 2, 3]
# This should work
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(
getattr(UpperCAmelCase__ , "_is_accelerate_prepared" , UpperCAmelCase__ ) , UpperCAmelCase__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(UpperCAmelCase__ , "_is_accelerate_prepared" , UpperCAmelCase__ ) , UpperCAmelCase__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(UpperCAmelCase__ , "_is_accelerate_prepared" , UpperCAmelCase__ ) , UpperCAmelCase__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(UpperCAmelCase__ , "_is_accelerate_prepared" , UpperCAmelCase__ ) , UpperCAmelCase__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(UpperCAmelCase__ , "_is_accelerate_prepared" , UpperCAmelCase__ ) , UpperCAmelCase__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(UpperCAmelCase__ , "_is_accelerate_prepared" , UpperCAmelCase__ ) , UpperCAmelCase__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def UpperCAmelCase_ ( self : Any ) -> int:
from transformers import AutoModelForCausalLM
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=UpperCAmelCase__ , device_map={"": 0} , )
__SCREAMING_SNAKE_CASE = Accelerator()
# This should work
__SCREAMING_SNAKE_CASE = accelerator.prepare(UpperCAmelCase__ )
@slow
@require_bnb
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
from transformers import AutoModelForCausalLM
__SCREAMING_SNAKE_CASE = Accelerator()
with init_empty_weights():
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
__SCREAMING_SNAKE_CASE = infer_auto_device_map(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = "cpu"
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=UpperCAmelCase__ , load_in_abit=UpperCAmelCase__ , llm_inta_enable_fpaa_cpu_offload=UpperCAmelCase__ )
# This should not work and get value error
with self.assertRaises(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = accelerator.prepare(UpperCAmelCase__ )
@slow
@require_bnb
@require_multi_gpu
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
from transformers import AutoModelForCausalLM
__SCREAMING_SNAKE_CASE = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
__SCREAMING_SNAKE_CASE = infer_auto_device_map(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=UpperCAmelCase__ , device_map=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = Accelerator()
# This should not work and get value error
with self.assertRaises(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = accelerator.prepare(UpperCAmelCase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
from transformers import AutoModelForCausalLM
with init_empty_weights():
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
__SCREAMING_SNAKE_CASE = infer_auto_device_map(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=UpperCAmelCase__ , device_map=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = Accelerator()
# This should work
__SCREAMING_SNAKE_CASE = accelerator.prepare(UpperCAmelCase__ )
@require_cuda
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = torch.nn.Linear(1_0 , 1_0 )
__SCREAMING_SNAKE_CASE = torch.optim.SGD(model.parameters() , lr=0.01 )
__SCREAMING_SNAKE_CASE = Accelerator(cpu=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = accelerator.prepare(UpperCAmelCase__ )
| 195 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : List[str] = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 195 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DeiTFeatureExtractor"""]
_UpperCAmelCase = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_UpperCAmelCase = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
_UpperCAmelCase = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
_UpperCAmelCase = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
_UpperCAmelCase = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
_UpperCAmelCase = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ):
'''simple docstring'''
for tf_name, hf_name in patterns:
A_ : Tuple = k.replace(__lowercase ,__lowercase )
return k
def UpperCamelCase ( __lowercase : dict ,__lowercase : dict ):
'''simple docstring'''
A_ : int = BigBirdPegasusConfig(**__lowercase )
A_ : Any = BigBirdPegasusForConditionalGeneration(__lowercase )
A_ : Union[str, Any] = torch_model.state_dict()
A_ : Any = {}
# separating decoder weights
A_ : Any = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
A_ : str = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() ,'tf -> hf conversion' ):
A_ : Optional[int] = [k.endswith(__lowercase ) for ending in KEYS_TO_IGNORE]
if any(__lowercase ):
continue
A_ : Optional[Any] = DECODER_PATTERNS
A_ : Tuple = rename_state_dict_key(__lowercase ,__lowercase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
A_ : Any = v.T
A_ : Any = torch.from_numpy(__lowercase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() ,'tf -> hf conversion' ):
A_ : int = [k.endswith(__lowercase ) for ending in KEYS_TO_IGNORE]
if any(__lowercase ):
continue
A_ : Any = REMAINING_PATTERNS
A_ : List[str] = rename_state_dict_key(__lowercase ,__lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
A_ : int = v.T
A_ : Dict = torch.from_numpy(__lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
A_ : Optional[int] = mapping['model.embed_positions.weight']
A_ : Tuple = mapping.pop('model.embed_positions.weight' )
A_ , A_ : Optional[Any] = torch_model.load_state_dict(__lowercase ,strict=__lowercase )
A_ : Optional[int] = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : str = tf.train.list_variables(__lowercase )
A_ : Union[str, Any] = {}
A_ : Optional[Any] = ['global_step']
for name, shape in tqdm(__lowercase ,desc='converting tf checkpoint to dict' ):
A_ : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
A_ : Tuple = tf.train.load_variable(__lowercase ,__lowercase )
A_ : Dict = array
return tf_weights
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : dict ):
'''simple docstring'''
A_ : Optional[Any] = get_tf_weights_as_numpy(__lowercase )
A_ : Dict = convert_bigbird_pegasus(__lowercase ,__lowercase )
torch_model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 140 | 1 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__SCREAMING_SNAKE_CASE ="src/transformers"
# Matches is_xxx_available()
__SCREAMING_SNAKE_CASE =re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__SCREAMING_SNAKE_CASE =re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__SCREAMING_SNAKE_CASE =re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__SCREAMING_SNAKE_CASE =re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__SCREAMING_SNAKE_CASE =re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__SCREAMING_SNAKE_CASE =re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*try:")
# Catches a line with else:
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*else:")
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
lowercase_ : Tuple = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : str = f.readlines()
lowercase_ : Any = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase_ : Optional[Any] = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase_ : Union[str, Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
lowercase_ : List[Any] = re.findall('\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase_ : Optional[Any] = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase_ : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase_ : int = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase_ : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase_ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase_ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase_ : str = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase_ : Dict = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase_ : Tuple = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase_ : Dict = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase_ : Optional[int] = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase_ : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase_ : int = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase_ : List[Any] = lines[line_index]
lowercase_ : List[str] = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase_ : Dict = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase_ : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase_ : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase_ : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase_ : str = lines[line_index]
lowercase_ : Dict = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase_ : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
def find_duplicates(__SCREAMING_SNAKE_CASE : Union[str, Any] ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase_ : int = []
for key in import_dict_objects.keys():
lowercase_ : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase_ : List[str] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase_ : Any = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowercase__( ):
lowercase_ : Union[str, Any] = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase_ : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase_ : Optional[int] = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase_ : Optional[Any] = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase_ : Dict = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) )
def lowercase__( ):
lowercase_ : Any = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase_ : Dict = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[int] = short_path.replace(os.path.sep , '.' )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase_ : Dict = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Tuple = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
__SCREAMING_SNAKE_CASE =[
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowercase__( ):
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ : List[str] = importlib.util.spec_from_file_location(
'transformers' , os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase_ : List[str] = spec.loader.load_module()
lowercase_ : str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase_ : Any = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 321 | """simple docstring"""
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py")
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'}
lowercase_ : Union[str, Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : Any = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowercase_ : Any = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Tuple = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowercase_ : Optional[Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
| 321 | 1 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _snake_case ( lowercase__ ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _snake_case ( ):
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
_lowerCamelCase : List[str] = [1, 2, 3]
with pytest.raises(lowercase__ ):
with parallel_backend('unsupported backend' ):
map_nested(lowercase__ , lowercase__ , num_proc=2 )
with pytest.raises(lowercase__ ):
with parallel_backend('unsupported backend' ):
map_nested(lowercase__ , lowercase__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[Any] = [1, 2]
_lowerCamelCase : Any = {'a': 1, 'b': 2}
_lowerCamelCase : int = {'a': [1, 2], 'b': [3, 4]}
_lowerCamelCase : List[Any] = {'a': {'1': 1}, 'b': 2}
_lowerCamelCase : List[Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
_lowerCamelCase : Any = [2, 3]
_lowerCamelCase : str = {'a': 2, 'b': 3}
_lowerCamelCase : Dict = {'a': [2, 3], 'b': [4, 5]}
_lowerCamelCase : List[str] = {'a': {'1': 2}, 'b': 3}
_lowerCamelCase : Optional[Any] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa | 96 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 | 1 |
__a :str = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__a :Dict = {value: key for key, value in MORSE_CODE_DICT.items()}
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def __snake_case ( ):
"""simple docstring"""
A_ = "Morse code here!"
print(__UpperCamelCase )
A_ = encrypt(__UpperCamelCase )
print(__UpperCamelCase )
A_ = decrypt(__UpperCamelCase )
print(__UpperCamelCase )
if __name__ == "__main__":
main() | 329 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = original_name.split("." )[0]
A_ = key.split("." )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
A_ = orig_block_num - offset
A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = OrderedDict()
A_ , A_ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A_ = key.replace("network" ,"poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ = key[: key.find("proj" )]
A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
A_ = key.replace("proj" ,"projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ = "poolformer.encoder." + key
if "mlp.fc1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" )
if "mlp.fc2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" )
if "norm1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" )
if "norm2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" )
if "layer_scale_1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" )
if "layer_scale_2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" )
if "head" in key:
A_ = key.replace("head" ,"classifier" )
A_ = value
return new_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = PoolFormerConfig()
# set attributes based on model_name
A_ = "huggingface/label-files"
A_ = model_name[-3:]
A_ = 1000
A_ = "imagenet-1k-id2label.json"
A_ = (1, 1000)
# set config attributes
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ = [2, 2, 6, 2]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s24":
A_ = [4, 4, 12, 4]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s36":
A_ = [6, 6, 18, 6]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 1E-6
A_ = 0.9
elif size == "m36":
A_ = [6, 6, 18, 6]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
elif size == "m48":
A_ = [8, 8, 24, 8]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a :int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 329 | 1 |
"""simple docstring"""
import logging
from transformers import PretrainedConfig
a : Tuple = logging.getLogger(__name__)
a : Dict = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] ="""bertabs"""
def __init__( self , lowerCAmelCase__=3_0522 , lowerCAmelCase__=512 , lowerCAmelCase__=6 , lowerCAmelCase__=512 , lowerCAmelCase__=8 , lowerCAmelCase__=512 , lowerCAmelCase__=0.2 , lowerCAmelCase__=6 , lowerCAmelCase__=768 , lowerCAmelCase__=8 , lowerCAmelCase__=2048 , lowerCAmelCase__=0.2 , **lowerCAmelCase__ , ) -> int:
super().__init__(**lowerCAmelCase__ )
a : Dict = vocab_size
a : str = max_pos
a : str = enc_layers
a : int = enc_hidden_size
a : Tuple = enc_heads
a : Dict = enc_ff_size
a : List[Any] = enc_dropout
a : str = dec_layers
a : Dict = dec_hidden_size
a : Union[str, Any] = dec_heads
a : Union[str, Any] = dec_ff_size
a : List[Any] = dec_dropout
| 105 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=64 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : Union[str, Any] = seq_length
UpperCAmelCase__ : Optional[int] = is_training
UpperCAmelCase__ : Optional[int] = use_input_mask
UpperCAmelCase__ : int = use_token_type_ids
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = max_position_embeddings
UpperCAmelCase__ : List[str] = type_vocab_size
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : List[Any] = vocab_size - 1
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Dict = None
if self.use_input_mask:
UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _a (self ):
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ : str = True
return config, input_ids, input_mask, token_labels
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = GPTNeoXModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Union[str, Any] = GPTNeoXModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = GPTNeoXForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.num_labels
UpperCAmelCase__ : Any = GPTNeoXForQuestionAnswering(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : Dict = GPTNeoXForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : str = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = self.num_labels
UpperCAmelCase__ : Dict = GPTNeoXForTokenClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : str = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : str = GPTNeoXForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# first forward pass
UpperCAmelCase__ : List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase__ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase__ : List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , output_hidden_states=_lowerCamelCase )
UpperCAmelCase__ : int = output_from_no_past["""hidden_states"""][0]
UpperCAmelCase__ : Union[str, Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = config_and_inputs
UpperCAmelCase__ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (GPTNeoXForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = GPTNeoXModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=64 , num_attention_heads=8 )
def _a (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _a (self ):
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase__ : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase__ : Tuple = GPTNeoXModel(_lowerCamelCase )
original_model.to(_lowerCamelCase )
original_model.eval()
UpperCAmelCase__ : Optional[int] = original_model(_lowerCamelCase ).last_hidden_state
UpperCAmelCase__ : Optional[Any] = original_model(_lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase__ : str = {"""type""": scaling_type, """factor""": 10.0}
UpperCAmelCase__ : Tuple = GPTNeoXModel(_lowerCamelCase )
scaled_model.to(_lowerCamelCase )
scaled_model.eval()
UpperCAmelCase__ : Dict = scaled_model(_lowerCamelCase ).last_hidden_state
UpperCAmelCase__ : Optional[int] = scaled_model(_lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCAmelCase__ : Any = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_lowerCamelCase )
UpperCAmelCase__ : Any = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_lowerCamelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase__ : Union[str, Any] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCAmelCase__ : List[Any] = model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=20 )
UpperCAmelCase__ : Tuple = tokenizer.batch_decode(_lowerCamelCase )[0]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 166 |
"""simple docstring"""
from math import factorial, radians
def a__ ( lowerCAmelCase , lowerCAmelCase = 18 , lowerCAmelCase = 10 ) -> float:
UpperCAmelCase__ : List[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
UpperCAmelCase__ : Any = radians(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = angle_in_radians
UpperCAmelCase__ : Tuple = 3
UpperCAmelCase__ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
UpperCAmelCase__ : int = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 166 | 1 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase : int =['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class a_ ( _lowerCAmelCase ):
def __init__( self : List[Any] , lowercase : int , lowercase : Any , lowercase : Tuple=None , lowercase : Optional[Any]=1 ):
"""simple docstring"""
lowercase_ :Optional[int] = tokenizer
lowercase_ :int = dataset
lowercase_ :str = len(lowercase ) if n_tasks is None else n_tasks
lowercase_ :Tuple = n_copies
def __iter__( self : Tuple ):
"""simple docstring"""
lowercase_ :List[str] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
lowercase_ :Tuple = self.tokenizer(lowercase , padding=lowercase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( _lowerCAmelCase ):
def __init__( self : List[str] , lowercase : List[str] , lowercase : List[Any] , lowercase : Dict ):
"""simple docstring"""
lowercase_ :int = start_length
lowercase_ :Optional[int] = eof_strings
lowercase_ :int = tokenizer
def __call__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Any , **lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowercase_ :Union[str, Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowercase )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ):
lowercase_ :Optional[Any] = re.split("(%s)" % "|".join(__lowerCamelCase ) ,__lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Dict ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[str] ,__lowerCamelCase : str ,__lowerCamelCase : List[Any]=20 ,**__lowerCamelCase : Union[str, Any] ):
lowercase_ :int = defaultdict(__lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCamelCase ) ):
with torch.no_grad():
lowercase_ :List[Any] = batch["ids"].shape[-1]
lowercase_ :List[str] = accelerator.unwrap_model(__lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] ,num_return_sequences=__lowerCamelCase ,**__lowerCamelCase )
# each task is generated batch_size times
lowercase_ :List[str] = batch["task_id"].repeat(__lowerCamelCase )
lowercase_ :Optional[Any] = accelerator.pad_across_processes(
__lowerCamelCase ,dim=1 ,pad_index=tokenizer.pad_token_id )
lowercase_ , lowercase_ :Any = accelerator.gather((generated_tokens, generated_tasks) )
lowercase_ :int = generated_tokens.cpu().numpy()
lowercase_ :Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCamelCase ,__lowerCamelCase ):
gen_token_dict[task].append(__lowerCamelCase )
lowercase_ :Tuple = [[] for _ in range(__lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowercase_ :Tuple = tokenizer.decode(__lowerCamelCase ,skip_special_tokens=__lowerCamelCase ,clean_up_tokenization_spaces=__lowerCamelCase )
code_gens[task].append(remove_last_block(__lowerCamelCase ) )
return code_gens
def UpperCAmelCase_ ( ):
# Setup configuration
lowercase_ :Optional[int] = HfArgumentParser(__lowerCamelCase )
lowercase_ :int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowercase_ :int = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowercase_ :Tuple = "false"
if args.num_workers is None:
lowercase_ :Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowercase_ :Dict = Accelerator()
set_seed(args.seed ,device_specific=__lowerCamelCase )
# Load model and tokenizer
lowercase_ :Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase_ :Any = tokenizer.eos_token
lowercase_ :Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowercase_ :Optional[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 ,__lowerCamelCase ,__lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowercase_ :Any = load_dataset("openai_humaneval" )
lowercase_ :Dict = load_metric("code_eval" )
lowercase_ :str = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
lowercase_ :int = args.n_samples // args.batch_size
lowercase_ :Union[str, Any] = TokenizedDataset(__lowerCamelCase ,human_eval["test"] ,n_copies=__lowerCamelCase ,n_tasks=__lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowercase_ :Any = DataLoader(__lowerCamelCase ,batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowercase_ :str = code_eval_metric.compute(references=[""] ,predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
lowercase_ , lowercase_ :List[Any] = accelerator.prepare(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :Optional[int] = complete_code(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,n_tasks=__lowerCamelCase ,batch_size=args.batch_size ,**__lowerCamelCase ,)
if accelerator.is_main_process:
lowercase_ :Dict = []
for task in tqdm(range(__lowerCamelCase ) ):
lowercase_ :Any = human_eval["test"][task]["test"]
lowercase_ :str = F'check({human_eval["test"][task]["entry_point"]})'
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
lowercase_ , lowercase_ :List[str] = code_eval_metric.compute(
references=__lowerCamelCase ,predictions=__lowerCamelCase ,num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file ,"w" ) as fp:
json.dump(__lowerCamelCase ,__lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 223 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Tuple ={
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class a_ ( _lowerCAmelCase ):
__A = "autoformer"
__A = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Any , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "student_t" , lowercase : str = "nll" , lowercase : int = 1 , lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase : bool = True , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = 0 , lowercase : Optional[List[int]] = None , lowercase : Optional[List[int]] = None , lowercase : int = 64 , lowercase : int = 2 , lowercase : int = 2 , lowercase : int = 2 , lowercase : int = 2 , lowercase : int = 32 , lowercase : int = 32 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 100 , lowercase : float = 0.02 , lowercase : bool = True , lowercase : int=True , lowercase : int = 10 , lowercase : int = 25 , lowercase : int = 3 , **lowercase : Dict , ):
"""simple docstring"""
lowercase_ :str = prediction_length
lowercase_ :Dict = context_length if context_length is not None else prediction_length
lowercase_ :Any = distribution_output
lowercase_ :Tuple = loss
lowercase_ :Dict = input_size
lowercase_ :Tuple = num_time_features
lowercase_ :int = lags_sequence
lowercase_ :Tuple = scaling
lowercase_ :List[Any] = num_dynamic_real_features
lowercase_ :Union[str, Any] = num_static_real_features
lowercase_ :str = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase_ :Optional[int] = cardinality
else:
lowercase_ :Optional[int] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase_ :Tuple = embedding_dimension
else:
lowercase_ :Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase_ :Any = num_parallel_samples
# Transformer architecture configuration
lowercase_ :Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase_ :Union[str, Any] = d_model
lowercase_ :Optional[Any] = encoder_attention_heads
lowercase_ :Optional[Any] = decoder_attention_heads
lowercase_ :Optional[int] = encoder_ffn_dim
lowercase_ :int = decoder_ffn_dim
lowercase_ :Any = encoder_layers
lowercase_ :Optional[Any] = decoder_layers
lowercase_ :Dict = dropout
lowercase_ :Dict = attention_dropout
lowercase_ :str = activation_dropout
lowercase_ :int = encoder_layerdrop
lowercase_ :Dict = decoder_layerdrop
lowercase_ :List[str] = activation_function
lowercase_ :int = init_std
lowercase_ :Optional[int] = use_cache
# Autoformer
lowercase_ :List[str] = label_length
lowercase_ :List[str] = moving_average
lowercase_ :Optional[int] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def lowercase__ ( self : int ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 223 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE_:int = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __UpperCamelCase ( _lowerCAmelCase=None ) -> Union[str, Any]:
"""simple docstring"""
if subparsers is not None:
A : Dict = subparsers.add_parser("""tpu-config""" , description=_description )
else:
A : str = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
A : Tuple = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=_lowerCAmelCase , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=_lowerCAmelCase , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
A : List[str] = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=_lowerCAmelCase , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : str = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowerCAmelCase ):
A : Dict = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A : Any = defaults.command_file
if not args.command and defaults.commands is not None:
A : List[Any] = defaults.commands
if not args.tpu_name:
A : str = defaults.tpu_name
if not args.tpu_zone:
A : str = defaults.tpu_zone
if args.accelerate_version == "dev":
A : Union[str, Any] = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
A : List[Any] = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , _lowerCAmelCase ):
A : List[Any] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
A : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowerCAmelCase ):
A : Optional[Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A : Union[str, Any] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
A : Optional[int] = '''; '''.join(_lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(_lowerCAmelCase )}''' )
return
subprocess.run(_lowerCAmelCase )
print("""Successfully setup pod.""" )
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = tpu_command_parser()
A : Tuple = parser.parse_args()
tpu_command_launcher(_lowerCAmelCase )
| 357 |
import os
from pathlib import Path
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
from torch.utils.cpp_extension import load
A : Any = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
A : int = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 115 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: str = logging.get_logger(__name__)
lowerCAmelCase: Optional[int] = {'vocab_file': 'sentencepiece.model'}
lowerCAmelCase: Any = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
lowerCAmelCase: Any = {
'google/rembert': 2_5_6,
}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : int=False , __snake_case : Tuple=True , __snake_case : List[str]=True , __snake_case : str="[CLS]" , __snake_case : List[Any]="[SEP]" , __snake_case : Dict="[UNK]" , __snake_case : List[str]="[SEP]" , __snake_case : Any="[PAD]" , __snake_case : str="[CLS]" , __snake_case : Optional[Any]="[MASK]" , **__snake_case : Optional[int] , ):
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
a : List[str] = do_lower_case
a : str = remove_space
a : Tuple = keep_accents
a : Optional[Any] = vocab_file
a : List[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(__snake_case )
@property
def lowercase_ ( self : List[Any] ):
return len(self.sp_model )
def lowercase_ ( self : Any ):
a : Optional[Any] = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
a : List[Any] = self.__dict__.copy()
a : List[str] = None
return state
def __setstate__( self : Optional[int] , __snake_case : Any ):
a : str = d
a : Union[str, Any] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : Optional[int] , __snake_case : List[str] , __snake_case : Any=False ):
a : Union[str, Any] = self.sp_model.EncodeAsPieces(__snake_case )
return pieces
def lowercase_ ( self : List[str] , __snake_case : Any ):
return self.sp_model.PieceToId(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[Any] ):
return self.sp_model.IdToPiece(__snake_case )
def lowercase_ ( self : Tuple , __snake_case : Optional[int] ):
a : Optional[int] = self.sp_model.decode_pieces(__snake_case )
return out_string
def lowercase_ ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : List[str] = [self.sep_token_id]
a : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1]
def lowercase_ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : Optional[int] = [self.sep_token_id]
a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[Any] , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(__snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(__snake_case ) )
return
a : Tuple = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,) | 297 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase: Dict = logging.get_logger(__name__)
lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCAmelCase: str = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , )
a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) )
a : Optional[Any] = add_prefix_space
a : Optional[Any] = pre_tok_class(**__snake_case )
a : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a : Dict = 'post_processor'
a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case )
if tokenizer_component_instance:
a : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a : Any = tuple(state['sep'] )
if "cls" in state:
a : Any = tuple(state['cls'] )
a : Optional[Any] = False
if state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : Any = add_prefix_space
a : Optional[Any] = True
if state.get('trim_offsets' , __snake_case ) != trim_offsets:
a : List[Any] = trim_offsets
a : Union[str, Any] = True
if changes_to_apply:
a : int = getattr(__snake_case , state.pop('type' ) )
a : List[Any] = component_class(**__snake_case )
setattr(self.backend_tokenizer , __snake_case , __snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , __snake_case : List[str] ):
a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value
a : Optional[int] = value
def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ):
a : Dict = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ):
a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ):
a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : int = [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
a : Optional[Any] = super()._pad(
encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
# Load from model defaults
if return_attention_mask is None:
a : str = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs | 297 | 1 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 169 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES
else:
SCREAMING_SNAKE_CASE__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES[tokenizer_name]
SCREAMING_SNAKE_CASE__ = True
if checkpoint_name is None:
SCREAMING_SNAKE_CASE__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
SCREAMING_SNAKE_CASE__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = checkpoint.split('/' )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = dump_path
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
SCREAMING_SNAKE_CASE__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
SCREAMING_SNAKE_CASE__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
SCREAMING_SNAKE_CASE__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(UpperCamelCase_ )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
__snake_case = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 169 | 1 |
'''simple docstring'''
from math import loga
def snake_case_ ( _lowerCAmelCase : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 303 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
UpperCAmelCase = {
"moussaKam/mbarthez": 1_024,
"moussaKam/barthez": 1_024,
"moussaKam/barthez-orangesum-title": 1_024,
}
UpperCAmelCase = "▁"
class UpperCAmelCase_ ( lowercase_):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]="<s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : int="</s>" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : Tuple="<unk>" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : int="<mask>" , __UpperCamelCase : str = None , **__UpperCamelCase : List[Any] , ) -> None:
_UpperCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
_UpperCamelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
_UpperCamelCase = len(self.sp_model ) - 1
_UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict = None , __UpperCamelCase : str = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def _UpperCamelCase ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] = None ) -> List[int]:
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self : Any ) -> List[Any]:
return len(self.sp_model )
def _UpperCamelCase ( self : str ) -> int:
_UpperCamelCase = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self : Dict , __UpperCamelCase : Any ) -> List[str]:
return self.sp_model.encode(a__ , out_type=a__ )
def _UpperCamelCase ( self : Dict , __UpperCamelCase : List[str] ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase = self.sp_model.PieceToId(a__ )
return spm_id if spm_id else self.unk_token_id
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[int] ) -> Tuple:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a__ )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : Optional[int] ) -> Optional[Any]:
_UpperCamelCase = []
_UpperCamelCase = ''''''
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(a__ )
_UpperCamelCase = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def __getstate__( self : Dict ) -> Dict:
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[int] , __UpperCamelCase : Tuple ) -> str:
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] = None ) -> Tuple[str]:
if not os.path.isdir(a__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 353 | """simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCAmelCase = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
UpperCAmelCase = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
UpperCAmelCase = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def lowercase ( a__ : int , a__ : Tuple ) -> Optional[Any]:
return float((preds == labels).mean() )
def lowercase ( a__ : Optional[Any] , a__ : int ) -> Optional[int]:
_UpperCamelCase = simple_accuracy(a__ , a__ )
_UpperCamelCase = float(fa_score(y_true=a__ , y_pred=a__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase ( a__ : Any , a__ : Union[str, Any] ) -> Any:
_UpperCamelCase = float(pearsonr(a__ , a__ )[0] )
_UpperCamelCase = float(spearmanr(a__ , a__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def _UpperCamelCase ( self : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Any:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__UpperCamelCase , __UpperCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 54 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 136 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "gpt_neox"
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str=5_0_4_3_2 , lowerCAmelCase_ : List[Any]=6_1_4_4 , lowerCAmelCase_ : str=4_4 , lowerCAmelCase_ : Tuple=6_4 , lowerCAmelCase_ : Optional[int]=2_4_5_7_6 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Any=0.25 , lowerCAmelCase_ : int=1_0_0_0_0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : List[Any]=1E-5 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = rotary_pct
lowercase_ = rotary_emb_base
lowercase_ = attention_dropout
lowercase_ = hidden_dropout
lowercase_ = classifier_dropout
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_cache
lowercase_ = tie_word_embeddings
lowercase_ = use_parallel_residual
lowercase_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase_) or len(self.rope_scaling) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'''got {self.rope_scaling}''')
lowercase_ = self.rope_scaling.get("""type""" , lowerCAmelCase_)
lowercase_ = self.rope_scaling.get("""factor""" , lowerCAmelCase_)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''')
if rope_scaling_factor is None or not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''')
| 136 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class a ( UpperCAmelCase ):
_lowercase = "realm"
def __init__( self , A_=30522 , A_=768 , A_=128 , A_=12 , A_=12 , A_=8 , A_=3072 , A_="gelu_new" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=256 , A_=10 , A_=1e-3 , A_=5 , A_=320 , A_=13353718 , A_=5000 , A_=1 , A_=0 , A_=2 , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
# Common config
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Tuple = retriever_proj_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : List[Any] = num_candidates
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : int = type_vocab_size
_UpperCAmelCase : List[Any] = layer_norm_eps
# Reader config
_UpperCAmelCase : Optional[Any] = span_hidden_size
_UpperCAmelCase : Dict = max_span_width
_UpperCAmelCase : Union[str, Any] = reader_layer_norm_eps
_UpperCAmelCase : List[Any] = reader_beam_size
_UpperCAmelCase : Any = reader_seq_len
# Retrieval config
_UpperCAmelCase : Tuple = num_block_records
_UpperCAmelCase : List[str] = searcher_beam_size
| 189 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = "ZinengTang/tvlt-base"
_UpperCAmelCase : int = tempfile.mkdtemp()
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **A_ )
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : str = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , A_ )
self.assertIsInstance(processor.image_processor , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : List[str] = np.ones([12000] )
_UpperCAmelCase : int = feature_extractor(A_ , return_tensors="np" )
_UpperCAmelCase : int = processor(audio=A_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : Union[str, Any] = np.ones([3, 224, 224] )
_UpperCAmelCase : Tuple = image_processor(A_ , return_tensors="np" )
_UpperCAmelCase : List[str] = processor(images=A_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_feature_extractor()
_UpperCAmelCase : Dict = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : str = np.ones([12000] )
_UpperCAmelCase : Optional[Any] = np.ones([3, 224, 224] )
_UpperCAmelCase : List[Any] = processor(audio=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : str = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 189 | 1 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : str = StableDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__magic_name__ : Optional[int] = load_file(lowerCAmelCase )
__magic_name__ : Tuple = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__magic_name__ : Any = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
__magic_name__ : Optional[Any] = pipeline.text_encoder
else:
__magic_name__ : int = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
__magic_name__ : List[str] = pipeline.unet
# find the target layer
__magic_name__ : Optional[Any] = layer_infos.pop(0 )
while len(lowerCAmelCase ) > -1:
try:
__magic_name__ : Any = curr_layer.__getattr__(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
__magic_name__ : Any = layer_infos.pop(0 )
elif len(lowerCAmelCase ) == 0:
break
except Exception:
if len(lowerCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__magic_name__ : Optional[int] = layer_infos.pop(0 )
__magic_name__ : Any = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(lowerCAmelCase )
else:
pair_keys.append(lowerCAmelCase )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__magic_name__ : List[Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__magic_name__ : Tuple = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCAmelCase , lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
__magic_name__ : List[str] = state_dict[pair_keys[0]].to(torch.floataa )
__magic_name__ : str = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCAmelCase , lowerCAmelCase )
# update visited list
for item in pair_keys:
visited.append(lowerCAmelCase )
return pipeline
if __name__ == "__main__":
lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
lowerCAmelCase :List[Any] = parser.parse_args()
lowerCAmelCase :Any = args.base_model_path
lowerCAmelCase :Dict = args.checkpoint_path
lowerCAmelCase :Optional[Any] = args.dump_path
lowerCAmelCase :List[Any] = args.lora_prefix_unet
lowerCAmelCase :List[str] = args.lora_prefix_text_encoder
lowerCAmelCase :Tuple = args.alpha
lowerCAmelCase :Tuple = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCAmelCase :Optional[int] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 331 | """simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 768 , ):
super().__init__()
__a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) )
__a = nn.Parameter(torch.ones(1 , lowerCamelCase ) )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ):
__a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def a__ ( self , lowerCamelCase ):
__a = (embeds - self.mean) * 1.0 / self.std
return embeds
def a__ ( self , lowerCamelCase ):
__a = (embeds * self.std) + self.mean
return embeds
| 261 | 0 |
"""simple docstring"""
class _a :
"""simple docstring"""
def __init__( self : List[Any] )->List[str]:
_UpperCAmelCase = {}
def lowercase__ ( self : Optional[int] )->None:
print(self.vertex )
for i in self.vertex:
print(__UpperCamelCase , ''' -> ''' , ''' -> '''.join([str(__UpperCamelCase ) for j in self.vertex[i]] ) )
def lowercase__ ( self : Dict , __UpperCamelCase : int , __UpperCamelCase : int )->None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__UpperCamelCase )
else:
# else make a new vertex
_UpperCAmelCase = [to_vertex]
def lowercase__ ( self : Optional[int] )->None:
# visited array for storing already visited nodes
_UpperCAmelCase = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : list )->None:
# mark start vertex as visited
_UpperCAmelCase = True
print(__UpperCamelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
__A : str = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 326 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 1 |
import math
import qiskit
def A ( _lowercase = 1 , _lowercase = 1 , _lowercase = 1 ):
if (
isinstance(_lowercase , _lowercase )
or isinstance(_lowercase , _lowercase )
or isinstance(_lowercase , _lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(_lowercase ) != input_a)
or (math.floor(_lowercase ) != input_a)
or (math.floor(_lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
SCREAMING_SNAKE_CASE : Dict = qiskit.QuantumRegister(4 , '''qr''' )
SCREAMING_SNAKE_CASE : List[Any] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
SCREAMING_SNAKE_CASE : List[str] = [input_a, input_a, carry_in]
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.QuantumCircuit(_lowercase , _lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowercase ) # measure the last two qbits
SCREAMING_SNAKE_CASE : int = qiskit.Aer.get_backend('''aer_simulator''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.execute(_lowercase , _lowercase , shots=1_000 )
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 182 | import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCamelCase : Optional[Any] = ['gpt2']
__UpperCamelCase : str = 'gpt2'
if is_tf_available():
class lowercase__ ( tf.Module):
def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = tokenizer
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = TFGPTaLMHeadModel.from_config(UpperCamelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenized['''input_ids'''].to_tensor()
SCREAMING_SNAKE_CASE : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
SCREAMING_SNAKE_CASE : List[Any] = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase__ ( unittest.TestCase):
def __A ( self : int ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [GPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
SCREAMING_SNAKE_CASE : List[str] = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE : Tuple = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
SCREAMING_SNAKE_CASE : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __A ( self : str ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Dict = tokenizer([test_inputs] , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
SCREAMING_SNAKE_CASE : int = python_outputs[key].numpy()
SCREAMING_SNAKE_CASE : int = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : Optional[int] = tf.function(UpperCamelCase__ )
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = compiled_tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : str = ModelToSave(tokenizer=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.serving(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE : List[str] = Path(UpperCamelCase__ ) / '''saved.model'''
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={'''serving_default''': model.serving} )
SCREAMING_SNAKE_CASE : str = tf.saved_model.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = loaded_model.signatures['''serving_default'''](UpperCamelCase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(UpperCamelCase__ ) # Build model with some sample inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_tokenizer.get_config()
SCREAMING_SNAKE_CASE : Optional[Any] = TFGPTaTokenizer.from_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = model_from_config(UpperCamelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
SCREAMING_SNAKE_CASE : Tuple = 12_3123
for max_length in [3, 5, 1024]:
SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 182 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = ["model.decoder.embed_positions.weights"]
def A ( _lowerCamelCase ):
'''simple docstring'''
if "emb" in name:
_lowerCAmelCase : List[Any] = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
_lowerCAmelCase : Optional[Any] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
_lowerCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
_lowerCAmelCase : Dict = name.replace("linear1" , "fc1" )
if "linear2" in name:
_lowerCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
_lowerCAmelCase : Optional[Any] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
_lowerCAmelCase : Tuple = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
_lowerCAmelCase : Any = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
_lowerCAmelCase : Dict = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = list(state_dict.keys() )
_lowerCAmelCase : str = {}
for key in keys:
_lowerCAmelCase : Optional[int] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = rename_keys(_lowerCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
_lowerCAmelCase : int = val[:hidden_size, :]
_lowerCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_lowerCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_lowerCAmelCase : Optional[int] = val
else:
_lowerCAmelCase : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def A ( _lowerCamelCase ):
'''simple docstring'''
if checkpoint == "small":
# default config values
_lowerCAmelCase : Optional[int] = 1_024
_lowerCAmelCase : List[str] = 24
_lowerCAmelCase : int = 16
elif checkpoint == "medium":
_lowerCAmelCase : str = 1_536
_lowerCAmelCase : str = 48
_lowerCAmelCase : Union[str, Any] = 24
elif checkpoint == "large":
_lowerCAmelCase : Dict = 2_048
_lowerCAmelCase : Any = 48
_lowerCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
_lowerCAmelCase : int = MusicgenDecoderConfig(
hidden_size=_lowerCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_lowerCamelCase , num_attention_heads=_lowerCamelCase , )
return config
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="cpu" ):
'''simple docstring'''
_lowerCAmelCase : Any = MusicGen.get_pretrained(_lowerCamelCase , device=_lowerCamelCase )
_lowerCAmelCase : int = decoder_config_from_checkpoint(_lowerCamelCase )
_lowerCAmelCase : List[Any] = fairseq_model.lm.state_dict()
_lowerCAmelCase : Any = rename_state_dict(
_lowerCamelCase , hidden_size=decoder_config.hidden_size )
_lowerCAmelCase : Optional[Any] = TaEncoderModel.from_pretrained("t5-base" )
_lowerCAmelCase : Tuple = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_lowerCAmelCase : Union[str, Any] = MusicgenForCausalLM(_lowerCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_lowerCAmelCase : Optional[Any] = decoder.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(_lowerCamelCase ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
_lowerCAmelCase : Any = MusicgenForConditionalGeneration(text_encoder=_lowerCamelCase , audio_encoder=_lowerCamelCase , decoder=_lowerCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowerCamelCase )
# check we can do a forward pass
_lowerCAmelCase : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_lowerCAmelCase : List[str] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_lowerCAmelCase : Dict = model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_lowerCAmelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_lowerCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
_lowerCAmelCase : Tuple = MusicgenProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
# set the appropriate bos/pad token ids
_lowerCAmelCase : Optional[Any] = 2_048
_lowerCAmelCase : Any = 2_048
# set other default generation config params
_lowerCAmelCase : Optional[int] = int(30 * audio_encoder.config.frame_rate )
_lowerCAmelCase : Dict = True
_lowerCAmelCase : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(_lowerCamelCase )
processor.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
_snake_case = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 368 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BarthezTokenizer
_SCREAMING_SNAKE_CASE = BarthezTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def lowerCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
snake_case_ = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_lowerCAmelCase )
snake_case_ = tokenizer
def lowerCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = "<pad>"
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_lowerCAmelCase ) , 1_0_1_1_2_2 )
def lowerCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def lowerCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
snake_case_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
snake_case_ = self.tokenizer(
_lowerCAmelCase , max_length=len(_lowerCAmelCase ) , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
snake_case_ = batch.input_ids.tolist()[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = "I was born in 92000, and this is falsé."
snake_case_ = tokenizer.tokenize(_lowerCAmelCase )
snake_case_ = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
snake_case_ = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(_lowerCAmelCase )
snake_case_ = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
# fmt: off
snake_case_ = {"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
snake_case_ = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=_lowerCAmelCase , )
| 159 |
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE :Tuple = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE :Tuple = BASE_URL + '''/user'''
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE :Optional[Any] = os.environ.get('''USER_TOKEN''', '''''')
def _lowerCAmelCase ( lowerCAmelCase_ :str )->dict[Any, Any]:
'''simple docstring'''
snake_case_ = {
"Authorization": F'''token {auth_token}''',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 159 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a__ ( snake_case__ ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_A , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(_A , "num_attention_heads" ) )
class a__ :
def __init__( self , _A , _A=1_3 , _A=3_2 , _A=2 , _A=3 , _A=6_4_0 , _A=4 , _A="silu" , _A=3 , _A=3_2 , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.02 , _A=True , _A=True , _A=1_0 , _A=None , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = last_hidden_size
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = conv_kernel_size
__lowerCAmelCase = output_stride
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = classifier_dropout_prob
__lowerCAmelCase = use_labels
__lowerCAmelCase = is_training
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = MobileViTModel(config=_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MobileViTForImageClassification(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MobileViTForSemanticSegmentation(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Any = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_a : Union[str, Any] = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a : int = False
_a : Union[str, Any] = False
_a : List[str] = False
_a : Dict = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = MobileViTModelTester(self )
__lowerCAmelCase = MobileViTConfigTester(self , config_class=_A , has_text_modality=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
def check_hidden_states_output(_A , _A , _A ):
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(_A , _A ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = 5
self.assertEqual(len(_A ) , _A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowerCAmelCase = 2
for i in range(len(_A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(_A , _A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MobileViTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _a ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(_A )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ).to(_A )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**_A )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
__lowerCAmelCase = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowerCAmelCase = model.to(_A )
__lowerCAmelCase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ).to(_A )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**_A )
__lowerCAmelCase = outputs.logits
# verify the logits
__lowerCAmelCase = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , _A )
__lowerCAmelCase = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=_A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _A , atol=1E-4 ) )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowerCAmelCase = model.to(_A )
__lowerCAmelCase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ).to(_A )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**_A )
__lowerCAmelCase = outputs.logits.detach().cpu()
__lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(5_0, 6_0)] )
__lowerCAmelCase = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , _A )
__lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_A )
__lowerCAmelCase = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , _A )
| 102 |
import math
def _a ( SCREAMING_SNAKE_CASE_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( SCREAMING_SNAKE_CASE_ : float = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(SCREAMING_SNAKE_CASE_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 1 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 95 | """simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ):
lowercase__: Dict = parent
lowercase__: List[str] = batch_size
lowercase__: Optional[Any] = seq_length
lowercase__: List[Any] = is_training
lowercase__: int = use_attention_mask
lowercase__: Tuple = use_token_type_ids
lowercase__: Union[str, Any] = use_labels
lowercase__: str = vocab_size
lowercase__: str = hidden_size
lowercase__: str = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: List[str] = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: Tuple = hidden_dropout_prob
lowercase__: int = attention_probs_dropout_prob
lowercase__: int = max_position_embeddings
lowercase__: Union[str, Any] = type_vocab_size
lowercase__: List[Any] = type_sequence_label_size
lowercase__: Any = initializer_range
lowercase__: str = num_choices
def _snake_case ( self ):
lowercase__: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[Any] = None
if self.use_attention_mask:
lowercase__: Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: List[Any] = None
if self.use_token_type_ids:
lowercase__: str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self ):
lowercase__: str = self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__, lowercase__: Optional[Any] = config_and_inputs
lowercase__: Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = True
_UpperCAmelCase :Union[str, Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self ):
lowercase__: str = FlaxRoFormerModelTester(self )
@slow
def _snake_case ( self ):
for model_class_name in self.all_model_classes:
lowercase__: Dict = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_UpperCAmelCase )
lowercase__: int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Any = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase__: Optional[int] = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowercase__: List[Any] = model(_UpperCAmelCase )[0]
lowercase__: str = 50000
lowercase__: Tuple = (1, 6, vocab_size)
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: List[Any] = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 177 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
a__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 363 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = StableDiffusionSAGPipeline
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : int = False
def __lowerCAmelCase ( self ) ->Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) ->Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
SCREAMING_SNAKE_CASE : Tuple = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = '''.'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = sag_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE : int = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE : int = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = '''.'''
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = sag_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : int = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE : Optional[int] = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = '''.'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 19 | 0 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
UpperCAmelCase : Tuple =2_9979_2458
# Symbols
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =symbols("""ct x y z""")
def _lowerCAmelCase (_lowerCAmelCase):
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!")
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!")
return velocity / c
def _lowerCAmelCase (_lowerCAmelCase):
return 1 / sqrt(1 - beta(_lowerCAmelCase) ** 2)
def _lowerCAmelCase (_lowerCAmelCase):
return np.array(
[
[gamma(_lowerCAmelCase), -gamma(_lowerCAmelCase) * beta(_lowerCAmelCase), 0, 0],
[-gamma(_lowerCAmelCase) * beta(_lowerCAmelCase), gamma(_lowerCAmelCase), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase = None):
# Ensure event is not empty
if event is None:
UpperCamelCase_ = np.array([ct, x, y, z]) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_lowerCAmelCase) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
UpperCAmelCase : Optional[Any] =transform(2997_9245)
print("""Example of four vector: """)
print(F"ct' = {four_vector[0]}")
print(F"x' = {four_vector[1]}")
print(F"y' = {four_vector[2]}")
print(F"z' = {four_vector[3]}")
# Substitute symbols with numerical values
UpperCAmelCase : List[Any] ={ct: c, x: 1, y: 1, z: 1}
UpperCAmelCase : Optional[Any] =[four_vector[i].subs(sub_dict) for i in range(4)]
print(F"\n{numerical_vector}")
| 128 |
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = arr.split("," )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = [int(self.array[0] )] * len(self.array )
UpperCamelCase_ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
UpperCamelCase_ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
UpperCamelCase_ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
UpperCAmelCase : Tuple =input("""please input some numbers:""")
UpperCAmelCase : Optional[int] =SubArray(whole_array)
UpperCAmelCase : List[Any] =array.solve_sub_array()
print(("""the results is:""", re))
| 128 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __snake_case (_a ):
def __init__( self : Optional[int] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[Any] ) -> None:
'''simple docstring'''
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 364 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class __snake_case (_a ):
def __init__( self : Optional[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : Any ) -> None:
'''simple docstring'''
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 159 | 0 |
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]:
lowerCAmelCase = len(snake_case__ )
for i in range(length - 1 ):
lowerCAmelCase = i
for k in range(i + 1 , snake_case__ ):
if collection[k] < collection[least]:
lowerCAmelCase = k
if least != i:
lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : str = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 338 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase__ : Dict = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
self.assertFalse(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 338 | 1 |
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
raise TypeError('only integers accepted as input' )
else:
snake_case_ : Any = str(abs(__a ) )
snake_case_ : List[Any] = [list(__a ) for char in range(len(__a ) )]
for index in range(len(__a ) ):
num_transpositions[index].pop(__a )
return max(
int(''.join(list(__a ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 88 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : List[Any] = k_size // 2
snake_case_ ,snake_case_ : Any = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
snake_case_ : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(__a ) + square(__a )) / (2 * square(__a )) )
return g
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ ,snake_case_ : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
snake_case_ : int = height - k_size + 1
snake_case_ : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
snake_case_ : Optional[Any] = zeros((dst_height * dst_width, k_size * k_size) )
snake_case_ : Tuple = 0
for i, j in product(range(__a ) , range(__a ) ):
snake_case_ : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] )
snake_case_ : str = window
row += 1
# turn the kernel into shape(k*k, 1)
snake_case_ : List[Any] = gen_gaussian_kernel(__a , __a )
snake_case_ : str = ravel(__a )
# reshape and get the dst image
snake_case_ : Optional[int] = dot(__a , __a ).reshape(__a , __a ).astype(__a )
return dst
if __name__ == "__main__":
# read original image
_SCREAMING_SNAKE_CASE = imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_SCREAMING_SNAKE_CASE = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_SCREAMING_SNAKE_CASE = gaussian_filter(gray, 3, sigma=1)
_SCREAMING_SNAKE_CASE = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 88 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Any = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def __lowerCamelCase ( A__ ) -> List[str]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCamelCase = k.replace(A__ , A__ )
if k.startswith('encoder' ):
UpperCamelCase = k.replace('.attn' , '.self_attn' )
UpperCamelCase = k.replace('norm1' , 'self_attn_layer_norm' )
UpperCamelCase = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
UpperCamelCase = k.replace('norm1' , 'self_attn_layer_norm' )
UpperCamelCase = k.replace('norm2' , 'encoder_attn_layer_norm' )
UpperCamelCase = k.replace('norm3' , 'final_layer_norm' )
return k
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
UpperCamelCase = sd.pop(A__ )
UpperCamelCase = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
UpperCamelCase = v
_lowerCamelCase : Any = ["START"]
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
UpperCamelCase = torch.load(A__ , map_location='cpu' )
UpperCamelCase = model['model']
UpperCamelCase = BlenderbotConfig.from_json_file(A__ )
UpperCamelCase = BlenderbotForConditionalGeneration(A__ )
UpperCamelCase = m.model.state_dict().keys()
UpperCamelCase = []
UpperCamelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCamelCase = rename_state_dict_key(A__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCamelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A__ )
m.model.load_state_dict(A__ , strict=A__ )
m.half()
m.save_pretrained(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_lowerCamelCase : str = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 28 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = abs(UpperCAmelCase_ )
_UpperCamelCase : int = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = abs(UpperCAmelCase_ )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def A__ ( UpperCAmelCase_ ):
return sum(int(UpperCAmelCase_ ) for c in str(abs(UpperCAmelCase_ ) ) )
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
_UpperCamelCase : str = f'{func.__name__}({value})'
_UpperCamelCase : Tuple = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(UpperCAmelCase_ )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 83 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
__UpperCamelCase = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ElectraTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__="[UNK]", lowerCAmelCase__="[SEP]", lowerCAmelCase__="[PAD]", lowerCAmelCase__="[CLS]", lowerCAmelCase__="[MASK]", lowerCAmelCase__=True, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, do_lower_case=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, tokenize_chinese_chars=lowerCAmelCase__, strip_accents=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase', lowerCAmelCase__) != do_lower_case
or normalizer_state.get('strip_accents', lowerCAmelCase__) != strip_accents
or normalizer_state.get('handle_chinese_chars', lowerCAmelCase__) != tokenize_chinese_chars
):
snake_case_ = getattr(lowerCAmelCase__, normalizer_state.pop('type'))
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**lowerCAmelCase__)
snake_case_ = do_lower_case
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> List[Any]:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
| 312 | """simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 312 | 1 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list ):
if not nums:
raise ValueError("List is empty" )
return sum(UpperCamelCase__ ) / len(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( __snake_case ):
def __init__( self , A_ , A_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = 100 , A_ = None , A_ = None , A_ = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCamelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCamelCase : Union[str, Any] = int(A_ )
if sample_size % down_scale_factor != 0:
UpperCamelCase : List[str] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
" process." )
UpperCamelCase : Any = int(A_ )
UpperCamelCase : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(A_ , A_ ) and len(A_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase : Optional[Any] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ )
# set step values
self.scheduler.set_timesteps(A_ , device=audio.device )
UpperCamelCase : Optional[int] = self.scheduler.timesteps.to(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase : Dict = self.unet(A_ , A_ ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase : int = self.scheduler.step(A_ , A_ , A_ ).prev_sample
UpperCamelCase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase : Dict = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=A_ )
| 52 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Optional[Any] = "▁"
_lowercase : Any = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowercase : Union[str, Any] = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowercase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : str, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Optional[Any]="<s>", lowerCamelCase : List[Any]="</s>", lowerCamelCase : Optional[int]="</s>", lowerCamelCase : int="<s>", lowerCamelCase : List[Any]="<unk>", lowerCamelCase : Dict="<pad>", lowerCamelCase : Tuple="<mask>", lowerCamelCase : Optional[Dict[str, Any]] = None, **lowerCamelCase : Dict, )-> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : str =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else mask_token
lowerCamelCase__ : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
lowerCamelCase__ : Optional[Any] =vocab_file
lowerCamelCase__ : Any =monolingual_vocab_file
lowerCamelCase__ : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCamelCase__ : Any ={}
lowerCamelCase__ : Optional[Any] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ : Optional[int] =cnt
cnt += 1
with open(lowerCamelCase, '''r''', encoding='''utf-8''' ) as f:
for line in f.readlines():
lowerCamelCase__ : List[str] =line.strip().split()[0]
lowerCamelCase__ : Dict =len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ : Tuple =len(self.fairseq_tokens_to_ids )
lowerCamelCase__ : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =self.__dict__.copy()
lowerCamelCase__ : Optional[int] =None
lowerCamelCase__ : int =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any], lowerCamelCase : Tuple )-> Any:
lowerCamelCase__ : Optional[int] =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase__ : str ={}
lowerCamelCase__ : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case ( self : Dict, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : Optional[Any] =[self.cls_token_id]
lowerCamelCase__ : Optional[int] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self : Optional[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def snake_case ( self : Dict, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
lowerCamelCase__ : Optional[Any] =[self.sep_token_id]
lowerCamelCase__ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self : int )-> Optional[int]:
return len(self.fairseq_ids_to_tokens )
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] ={self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self : Optional[int], lowerCamelCase : str )-> List[str]:
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def snake_case ( self : int, lowerCamelCase : Optional[Any] )-> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case ( self : Any, lowerCamelCase : Optional[int] )-> Optional[int]:
return self.fairseq_ids_to_tokens[index]
def snake_case ( self : int, lowerCamelCase : Any )-> Optional[int]:
lowerCamelCase__ : List[str] =''''''.join(lowerCamelCase ).replace(lowerCamelCase, ''' ''' ).strip()
return out_string
def snake_case ( self : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Dict =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : Tuple =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''], )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowerCamelCase__ : Any =self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file, lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 272 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ : Optional[Any] ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
lowerCamelCase__ : Any =f'''{src_lang}-{tgt_lang}'''
lowerCamelCase__ : Any =f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
lowerCamelCase__ : str =os.path.join(__lowerCamelCase , '''README.md''' )
print(f'''Generating {path}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(__lowerCamelCase )
# make sure we are under the root of the project
_lowercase : List[str] = Path(__file__).resolve().parent.parent.parent
_lowercase : Dict = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_lowercase : int = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 272 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
lowercase = 0
lowercase = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
lowercase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
lowercase = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
lowercase = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
lowercase = 0
lowercase = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
lowercase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
lowercase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
lowercase = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
snake_case_ = datasets.utils.logging.get_logger(__name__)
snake_case_ = ['''names''', '''prefix''']
snake_case_ = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
snake_case_ = ['''encoding_errors''', '''on_bad_lines''']
snake_case_ = ['''date_format''']
@dataclass
class SCREAMING_SNAKE_CASE__ (datasets.BuilderConfig ):
__lowerCamelCase : str = ","
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[Union[int, List[int], str]] = "infer"
__lowerCamelCase : Optional[List[str]] = None
__lowerCamelCase : Optional[List[str]] = None
__lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] = None
__lowerCamelCase : Optional[Union[List[int], List[str]]] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : bool = True
__lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__lowerCamelCase : Optional[list] = None
__lowerCamelCase : Optional[list] = None
__lowerCamelCase : bool = False
__lowerCamelCase : Optional[Union[int, List[int]]] = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[Union[str, List[str]]] = None
__lowerCamelCase : bool = True
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : bool = True
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : str = "."
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : str = '"'
__lowerCamelCase : int = 0
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : bool = True
__lowerCamelCase : bool = True
__lowerCamelCase : int = 0
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : int = 1_0000
__lowerCamelCase : Optional[datasets.Features] = None
__lowerCamelCase : Optional[str] = "strict"
__lowerCamelCase : Literal["error", "warn", "skip"] = "error"
__lowerCamelCase : Optional[str] = None
def snake_case_ ( self):
if self.delimiter is not None:
lowercase__ : List[Any] = self.delimiter
if self.column_names is not None:
lowercase__ : Optional[int] = self.column_names
@property
def snake_case_ ( self):
lowercase__ : Dict = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , a):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class SCREAMING_SNAKE_CASE__ (datasets.ArrowBasedBuilder ):
__lowerCamelCase : Optional[Any] = CsvConfig
def snake_case_ ( self):
return datasets.DatasetInfo(features=self.config.features)
def snake_case_ ( self , a):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
lowercase__ : Any = dl_manager.download_and_extract(self.config.data_files)
if isinstance(a , (str, list, tuple)):
lowercase__ : List[str] = data_files
if isinstance(a , a):
lowercase__ : Optional[Any] = [files]
lowercase__ : Optional[int] = [dl_manager.iter_files(a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
lowercase__ : int = []
for split_name, files in data_files.items():
if isinstance(a , a):
lowercase__ : Optional[int] = [files]
lowercase__ : Tuple = [dl_manager.iter_files(a) for file in files]
splits.append(datasets.SplitGenerator(name=a , gen_kwargs={'files': files}))
return splits
def snake_case_ ( self , a):
if self.config.features is not None:
lowercase__ : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(a) for feature in self.config.features.values()):
# cheaper cast
lowercase__ : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=a)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowercase__ : Optional[Any] = table_cast(a , a)
return pa_table
def snake_case_ ( self , a):
lowercase__ : List[Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowercase__ : Optional[int] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(a) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(a)):
lowercase__ : int = pd.read_csv(a , iterator=a , dtype=a , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(a):
lowercase__ : List[str] = pa.Table.from_pandas(a)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a)
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a)}: {e}""")
raise
| 214 | 0 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
_A = """us-east-1""" # defaults region
@dataclass
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
SCREAMING_SNAKE_CASE = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
SCREAMING_SNAKE_CASE = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def _a (self ):
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _a (self ):
"""simple docstring"""
return F"""{self.framework}-transfromers-test"""
@property
def _a (self ):
"""simple docstring"""
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def _a (self ):
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def a__ ( lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : Union[str, Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 166 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 6_37_81_37
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float:
UpperCAmelCase__ : Union[str, Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase__ : List[str] = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) )
UpperCAmelCase__ : str = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase__ : Any = haversine_distance(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase__ : int = (b_lata + b_lata) / 2
UpperCAmelCase__ : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase__ : Optional[Any] = (sin(lowerCAmelCase ) ** 2) * (cos(lowerCAmelCase ) ** 2)
UpperCAmelCase__ : Optional[Any] = cos(sigma / 2 ) ** 2
UpperCAmelCase__ : Optional[int] = (sigma - sin(lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase__ : Optional[int] = (cos(lowerCAmelCase ) ** 2) * (sin(lowerCAmelCase ) ** 2)
UpperCAmelCase__ : str = sin(sigma / 2 ) ** 2
UpperCAmelCase__ : Union[str, Any] = (sigma + sin(lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 166 | 1 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( UpperCamelCase__):
@require_torch
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : List[Any] ="\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Any ="\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : int ="\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : str ="hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(lowerCAmelCase__ )
BertModel.from_pretrained(lowerCAmelCase__ )
BertTokenizer.from_pretrained(lowerCAmelCase__ )
pipeline(task="fill-mask" , model=lowerCAmelCase__ )
# baseline - just load from_pretrained with normal network
a__ : List[str] =[sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : List[str] =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] ="1"
a__ : List[str] =subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : List[Any] ="\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : int ="\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : str ="\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Any ="hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(lowerCAmelCase__ )
BertModel.from_pretrained(lowerCAmelCase__ )
BertTokenizer.from_pretrained(lowerCAmelCase__ )
pipeline(task="fill-mask" , model=lowerCAmelCase__ )
# baseline - just load from_pretrained with normal network
a__ : str =[sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any =self.get_env()
a__ : Dict =subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any ="\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Optional[Any] ="\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : Optional[int] ="\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : Any =[sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : List[Any] =self.get_env()
a__ : Dict =subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : Optional[int] =[sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : List[Any] ="1"
a__ : str =subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any ="\nfrom transformers import pipeline\n "
a__ : Any ="\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Tuple ="\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : Any =self.get_env()
a__ : List[Any] ="1"
a__ : Tuple =[sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any =subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Dict ="\nfrom transformers import AutoModel\n "
a__ : str ="\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] =[sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str =self.get_env()
a__ : Optional[int] =subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : int ="1"
a__ : List[str] =subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 95 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , )-> Optional[Any]:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def _snake_case ( self )-> Dict:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size] , 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self )-> List[str]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> str:
lowerCamelCase_ =FlaubertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> List[Any]:
lowerCamelCase_ =FlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Optional[Any]:
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Optional[int]:
lowerCamelCase_ =FlaubertForQuestionAnswering(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , )
((lowerCamelCase_) , ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
((lowerCamelCase_) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Any:
lowerCamelCase_ =FlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> List[Any]:
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Dict:
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =config_and_inputs
lowerCamelCase_ ={
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase:str = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> List[Any]:
lowerCamelCase_ =super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def _snake_case ( self )-> Optional[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> Optional[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """traced_model.pt""" ) )
lowerCamelCase_ =torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , """traced_model.pt""" ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict["""input_ids"""].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["""attention_mask"""].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
@slow
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 154 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 189 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a ( UpperCAmelCase ):
_lowercase = ["image_processor", "tokenizer"]
_lowercase = "OwlViTImageProcessor"
_lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , A_=None , A_=None , **A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A_ , )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("feature_extractor" )
_UpperCAmelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A_ , A_ )
def __call__( self , A_=None , A_=None , A_=None , A_="max_length" , A_="np" , **A_ ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(A_ , A_ ) or (isinstance(A_ , A_ ) and not isinstance(text[0] , A_ )):
_UpperCAmelCase : Optional[int] = [self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )]
elif isinstance(A_ , A_ ) and isinstance(text[0] , A_ ):
_UpperCAmelCase : Optional[int] = []
# Maximum number of queries across batch
_UpperCAmelCase : Optional[Any] = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
_UpperCAmelCase : Optional[int] = t + [" "] * (max_num_queries - len(A_ ))
_UpperCAmelCase : str = self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )
encodings.append(A_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_UpperCAmelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCAmelCase : Optional[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : str = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCAmelCase : str = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_UpperCAmelCase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCAmelCase : Union[str, Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Optional[int] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_UpperCAmelCase : Optional[int] = BatchEncoding()
_UpperCAmelCase : str = input_ids
_UpperCAmelCase : Optional[Any] = attention_mask
if query_images is not None:
_UpperCAmelCase : int = BatchEncoding()
_UpperCAmelCase : str = self.image_processor(
A_ , return_tensors=A_ , **A_ ).pixel_values
_UpperCAmelCase : Optional[Any] = query_pixel_values
if images is not None:
_UpperCAmelCase : int = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
_UpperCAmelCase : Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A_ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A_ , )
return self.image_processor
| 189 | 1 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = ['audio_values', 'audio_mask']
def __init__( self : Tuple ,_UpperCAmelCase : str=2048 ,_UpperCAmelCase : List[str]=1 ,_UpperCAmelCase : Any=[16, 16] ,_UpperCAmelCase : Dict=128 ,_UpperCAmelCase : Tuple=44100 ,_UpperCAmelCase : Optional[Any]=86 ,_UpperCAmelCase : str=2048 ,_UpperCAmelCase : Union[str, Any]=0.0 ,**_UpperCAmelCase : List[str] ,):
super().__init__(
feature_size=_UpperCAmelCase ,sampling_rate=_UpperCAmelCase ,padding_value=_UpperCAmelCase ,**_UpperCAmelCase ,)
_a : List[Any] = spectrogram_length
_a : Tuple = num_channels
_a : Dict = patch_size
_a : Optional[Any] = feature_size // self.patch_size[1]
_a : Tuple = n_fft
_a : List[Any] = sampling_rate // hop_length_to_sampling_rate
_a : List[str] = sampling_rate
_a : Union[str, Any] = padding_value
_a : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=_UpperCAmelCase ,min_frequency=0.0 ,max_frequency=2_20_50.0 ,sampling_rate=_UpperCAmelCase ,norm='slaney' ,mel_scale='slaney' ,).T
def __lowercase ( self : Tuple ,_UpperCAmelCase : np.array ):
_a : List[str] = spectrogram(
_UpperCAmelCase ,window_function(self.n_fft ,'hann' ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters.T ,log_mel='dB' ,db_range=80.0 ,)
_a : List[Any] = log_spec[:, :-1]
_a : Optional[Any] = log_spec - 20.0
_a : Dict = np.clip(log_spec / 40.0 ,-2.0 ,0.0 ) + 1.0
return log_spec
def __call__( self : int ,_UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCAmelCase : Optional[Union[str, TensorType]] = None ,_UpperCAmelCase : Optional[bool] = True ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Any ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a : Optional[Any] = isinstance(_UpperCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_a : Optional[int] = is_batched_numpy or (
isinstance(_UpperCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
_a : Union[str, Any] = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase ,np.ndarray ):
_a : Optional[int] = np.asarray(_UpperCAmelCase ,dtype=np.floataa )
elif isinstance(_UpperCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a : Dict = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_a : Any = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] ,_UpperCAmelCase ):
_a : Tuple = [np.asarray(_UpperCAmelCase ,dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_a : List[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_a : Dict = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_a : Union[str, Any] = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
_a : Dict = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_a : Union[str, Any] = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_a : List[Any] = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
_a : Any = audio_features[i]
_a : List[Any] = feature
# return as BatchFeature
if return_attention_mask:
_a : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
_a : Tuple = {'audio_values': padded_audio_features}
_a : Optional[Any] = BatchFeature(data=_UpperCAmelCase ,tensor_type=_UpperCAmelCase )
return encoded_inputs
| 89 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[int] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Optional[int] = """this is a test"""
UpperCAmelCase : Dict = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = """<pad>"""
UpperCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : List[str] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : str = tokenizer.tokenize(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = AlbertTokenizer(A )
UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Dict:
# fmt: off
UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 265 | 0 |
from collections import defaultdict
from math import gcd
def _lowercase ( UpperCamelCase_ = 1500000 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = defaultdict(_snake_case )
SCREAMING_SNAKE_CASE__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _snake_case , 2 ):
if gcd(_snake_case , _snake_case ) > 1:
continue
SCREAMING_SNAKE_CASE__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_snake_case , limit + 1 , _snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""") | 352 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__snake_case = logging.getLogger(__name__)
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple ="""summarization"""
A__ : Optional[int] =["""loss"""]
A__ : Optional[Any] =ROUGE_KEYS
A__ : str ="""rouge2"""
def __init__( self : List[str] , UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ):
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE__ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(UpperCAmelCase_ , num_labels=UpperCAmelCase_ , mode=self.mode , **UpperCAmelCase_ )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE__ = Path(self.output_dir ) / 'metrics.json'
SCREAMING_SNAKE_CASE__ = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = defaultdict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.config.model_type
SCREAMING_SNAKE_CASE__ = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
SCREAMING_SNAKE_CASE__ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE__ = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE__ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE__ = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE__ = get_git_info()['repo_sha']
SCREAMING_SNAKE_CASE__ = hparams.num_workers
SCREAMING_SNAKE_CASE__ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE__ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE__ = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE__ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE__ = self.model.config.max_length
SCREAMING_SNAKE_CASE__ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def A_ ( self : List[str] , UpperCAmelCase_ : Dict[str, torch.Tensor] ):
SCREAMING_SNAKE_CASE__ = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(UpperCAmelCase_ , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
SCREAMING_SNAKE_CASE__ = True
return readable_batch
def A_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ):
return self.model(UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : List[int] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return lmap(str.strip , UpperCAmelCase_ )
def A_ ( self : List[Any] , UpperCAmelCase_ : dict ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch['input_ids'], batch['attention_mask']
SCREAMING_SNAKE_CASE__ = batch['labels']
if isinstance(self.model , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = self.model._shift_right(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = shift_tokens_right(UpperCAmelCase_ , UpperCAmelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE__ = decoder_input_ids
self.save_readable_batch(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE__ = nn.CrossEntropyLoss(ignore_index=UpperCAmelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE__ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(UpperCAmelCase_ , dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = label_smoothed_nll_loss(
UpperCAmelCase_ , UpperCAmelCase_ , self.hparams.label_smoothing , ignore_index=UpperCAmelCase_ )
return (loss,)
@property
def A_ ( self : Dict ):
return self.tokenizer.pad_token_id
def A_ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = self._step(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(zip(self.loss_names , UpperCAmelCase_ ) )
# tokens per batch
SCREAMING_SNAKE_CASE__ = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE__ = batch['input_ids'].shape[0]
SCREAMING_SNAKE_CASE__ = batch['input_ids'].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE__ = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def A_ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ):
return self._generative_step(UpperCAmelCase_ )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple="val" ):
self.step_count += 1
SCREAMING_SNAKE_CASE__ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE__ = losses['loss']
SCREAMING_SNAKE_CASE__ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
SCREAMING_SNAKE_CASE__ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE__ = torch.tensor(UpperCAmelCase_ ).type_as(UpperCAmelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE__ = self.step_count
self.metrics[prefix].append(UpperCAmelCase_ ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE__ = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def A_ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ):
return calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : str , UpperCAmelCase_ : dict ):
SCREAMING_SNAKE_CASE__ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE__ = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=UpperCAmelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE__ = (time.time() - ta) / batch['input_ids'].shape[0]
SCREAMING_SNAKE_CASE__ = self.ids_to_clean_text(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.ids_to_clean_text(batch['labels'] )
SCREAMING_SNAKE_CASE__ = self._step(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(zip(self.loss_names , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = self.calc_generative_metrics(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.mean(lmap(UpperCAmelCase_ , UpperCAmelCase_ ) )
base_metrics.update(gen_time=UpperCAmelCase_ , gen_len=UpperCAmelCase_ , preds=UpperCAmelCase_ , target=UpperCAmelCase_ , **UpperCAmelCase_ )
return base_metrics
def A_ ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ):
return self._generative_step(UpperCAmelCase_ )
def A_ ( self : Any , UpperCAmelCase_ : List[str] ):
return self.validation_epoch_end(UpperCAmelCase_ , prefix='test' )
def A_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE__ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE__ = self.dataset_class(
self.tokenizer , type_path=UpperCAmelCase_ , n_obs=UpperCAmelCase_ , max_target_length=UpperCAmelCase_ , **self.dataset_kwargs , )
return dataset
def A_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = self.get_dataset(UpperCAmelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE__ = dataset.make_sortish_sampler(UpperCAmelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase_ , num_workers=self.num_workers , sampler=UpperCAmelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE__ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCAmelCase_ , batch_sampler=UpperCAmelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase_ , num_workers=self.num_workers , sampler=UpperCAmelCase_ , )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=UpperCAmelCase_ )
return dataloader
def A_ ( self : str ):
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def A_ ( self : int ):
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def A_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
BaseTransformer.add_model_specific_args(UpperCAmelCase_ , UpperCAmelCase_ )
add_generic_args(UpperCAmelCase_ , UpperCAmelCase_ )
parser.add_argument(
'--max_source_length' , default=1024 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=UpperCAmelCase_ )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=UpperCAmelCase_ )
parser.add_argument('--max_tokens_per_batch' , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
parser.add_argument('--logger_name' , type=UpperCAmelCase_ , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=UpperCAmelCase_ , default=500 , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=UpperCAmelCase_ , default='summarization' , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=UpperCAmelCase_ , default=0.0 , required=UpperCAmelCase_ )
parser.add_argument('--src_lang' , type=UpperCAmelCase_ , default='' , required=UpperCAmelCase_ )
parser.add_argument('--tgt_lang' , type=UpperCAmelCase_ , default='' , required=UpperCAmelCase_ )
parser.add_argument('--eval_beams' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ )
parser.add_argument(
'--val_metric' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=UpperCAmelCase_ , default=1 , required=UpperCAmelCase_ , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[Any] ="""translation"""
A__ : Dict =["""loss"""]
A__ : Optional[int] =["""bleu"""]
A__ : Union[str, Any] ="""bleu"""
def __init__( self : Tuple , UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = hparams.src_lang
SCREAMING_SNAKE_CASE__ = hparams.tgt_lang
def A_ ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ):
return calculate_bleu(UpperCAmelCase_ , UpperCAmelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=None ) -> SummarizationModule:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=UpperCamelCase_ )
check_output_dir(UpperCamelCase_ , expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE__ = SummarizationModule(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = TranslationModule(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
SCREAMING_SNAKE_CASE__ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE__ = os.environ.get('WANDB_PROJECT' , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = WandbLogger(name=model.output_dir.name , project=UpperCamelCase_ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE__ = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE__ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = args.val_metric == 'loss'
SCREAMING_SNAKE_CASE__ = generic_train(
UpperCamelCase_ , UpperCamelCase_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , UpperCamelCase_ ) , early_stopping_callback=UpperCamelCase_ , logger=UpperCamelCase_ , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=UpperCamelCase_ ) )
if checkpoints:
SCREAMING_SNAKE_CASE__ = checkpoints[-1]
SCREAMING_SNAKE_CASE__ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = pl.Trainer.add_argparse_args(parser)
__snake_case = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__snake_case = parser.parse_args()
main(args)
| 169 | 0 |
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase : int = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase : Any = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
lowerCAmelCase : int = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def a__ ( snake_case__ , snake_case__ = False ) -> Optional[Any]:
with open(lowercase_ , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase = f.read()
lowerCamelCase = content.split("""\n""" )
lowerCamelCase = []
lowerCamelCase = 0
while line_idx < len(lowercase_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowerCamelCase = len(re.search(R"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowerCamelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowerCamelCase = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowerCamelCase = sorted(lowercase_ , key=lambda snake_case__ : _re_identifier.search(lowercase_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowercase_ ) )
elif "\n".join(lowercase_ ) != content:
return True
def a__ ( snake_case__ = False ) -> List[Any]:
lowerCamelCase = [os.path.join(lowercase_ , lowercase_ ) for f in os.listdir(lowercase_ ) if f.endswith(""".py""" )]
lowerCamelCase = [sort_auto_mapping(lowercase_ , overwrite=lowercase_ ) for fname in fnames]
if not overwrite and any(lowercase_ ):
lowerCamelCase = [f for f, d in zip(lowercase_ , lowercase_ ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase_ )}. Run `make style` to fix'
""" this.""" )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : Union[str, Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 291 |
def UpperCamelCase (lowercase_: int = 10 ) -> str:
if not isinstance(lowercase_ , lowercase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ : List[str] = 10**n
A__ : Any = 28433 * (pow(2 , 7830457 , lowercase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 192 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Any = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | '''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase : Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Any=-1 , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : int=5_0_0 , UpperCAmelCase_ : int="gpt2-large" , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : List[str]=2_5 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2_5 , ):
"""simple docstring"""
a : List[str] = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 345 | 1 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase :str = getLogger(__name__)
lowerCAmelCase :Dict = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : str = DEFAULT_DEVICE , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Optional[int]="summarization" , lowerCAmelCase : Dict=None , **lowerCAmelCase : List[str] , ):
"""simple docstring"""
__magic_name__ : Dict = Path(_UpperCAmelCase ).open('w' , encoding='utf-8' )
__magic_name__ : Optional[int] = str(_UpperCAmelCase )
__magic_name__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
if fpaa:
__magic_name__ : Dict = model.half()
__magic_name__ : Tuple = AutoTokenizer.from_pretrained(_UpperCAmelCase )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
__magic_name__ : Dict = time.time()
# update config with task specific params
use_task_specific_params(_UpperCAmelCase , _UpperCAmelCase )
if prefix is None:
__magic_name__ : List[Any] = prefix or getattr(model.config , 'prefix' , '' ) or ''''''
for examples_chunk in tqdm(list(chunks(_UpperCAmelCase , _UpperCAmelCase ) ) ):
__magic_name__ : Any = [prefix + text for text in examples_chunk]
__magic_name__ : Union[str, Any] = tokenizer(_UpperCAmelCase , return_tensors='pt' , truncation=_UpperCAmelCase , padding='longest' ).to(_UpperCAmelCase )
__magic_name__ : Dict = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_UpperCAmelCase , )
__magic_name__ : List[Any] = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__magic_name__ : Dict = int(time.time() - start_time ) # seconds
__magic_name__ : Union[str, Any] = len(_UpperCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def lowerCamelCase ( lowerCAmelCase : Any=True ):
"""simple docstring"""
__magic_name__ : Dict = argparse.ArgumentParser()
parser.add_argument('model_name' , type=_UpperCAmelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=_UpperCAmelCase , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=_UpperCAmelCase , help='where to save summaries' )
parser.add_argument('--reference_path' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=_UpperCAmelCase , required=_UpperCAmelCase , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=_UpperCAmelCase , required=_UpperCAmelCase , default=_UpperCAmelCase , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=_UpperCAmelCase , required=_UpperCAmelCase , default=_UpperCAmelCase , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=_UpperCAmelCase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=_UpperCAmelCase , default=8 , required=_UpperCAmelCase , help='batch size' )
parser.add_argument(
'--n_obs' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=_UpperCAmelCase , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__magic_name__ : Dict = parser.parse_known_args()
__magic_name__ : Optional[Any] = parse_numeric_n_bool_cl_kwargs(_UpperCAmelCase )
if parsed_args and verbose:
print(f'parsed the following generate kwargs: {parsed_args}' )
__magic_name__ : Optional[int] = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__magic_name__ : Optional[int] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__magic_name__ : int = generate_summaries_or_translations(
_UpperCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_UpperCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__magic_name__ : str = calculate_bleu if '''translation''' in args.task else calculate_rouge
__magic_name__ : Tuple = [x.rstrip() for x in open(args.save_path ).readlines()]
__magic_name__ : int = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCAmelCase )]
__magic_name__ : dict = score_fn(_UpperCAmelCase , _UpperCAmelCase )
scores.update(_UpperCAmelCase )
if args.dump_args:
scores.update(_UpperCAmelCase )
if args.info:
__magic_name__ : Dict = args.info
if verbose:
print(_UpperCAmelCase )
if args.score_path is not None:
json.dump(_UpperCAmelCase , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True) | 331 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__A =getLogger(__name__)
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 8 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : Any="val" , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=False , _UpperCAmelCase : Union[str, Any]="summarization" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Dict = None , _UpperCAmelCase : Dict="" , **_UpperCAmelCase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : Any = str(_UpperCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = Path(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = save_dir.joinpath(f'rank_{local_rank}_output.json' )
torch.cuda.set_device(_UpperCAmelCase )
__UpperCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).cuda()
if fpaa:
__UpperCAmelCase : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_UpperCAmelCase , _UpperCAmelCase ) # update config with task specific params
__UpperCAmelCase : List[str] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__UpperCAmelCase : Any = num_return_sequences
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(_UpperCAmelCase )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
__UpperCAmelCase : Optional[Any] = tokenizer.model_max_length
if prefix is None:
__UpperCAmelCase : str = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__UpperCAmelCase : Union[str, Any] = SeqaSeqDataset(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , max_target_length=10_24 , type_path=_UpperCAmelCase , n_obs=_UpperCAmelCase , prefix=_UpperCAmelCase , **_UpperCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__UpperCAmelCase : str = ds.make_sortish_sampler(_UpperCAmelCase , distributed=_UpperCAmelCase , add_extra_examples=_UpperCAmelCase , shuffle=_UpperCAmelCase )
__UpperCAmelCase : List[Any] = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=ds.collate_fn )
__UpperCAmelCase : List[Any] = []
for batch in tqdm(_UpperCAmelCase ):
__UpperCAmelCase : str = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=_UpperCAmelCase , num_beams=_UpperCAmelCase , **_UpperCAmelCase , )
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
__UpperCAmelCase : List[str] = batch['''ids''']
if num_return_sequences > 1:
__UpperCAmelCase : Any = chunks(_UpperCAmelCase , _UpperCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_UpperCAmelCase ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(_UpperCAmelCase , _UpperCAmelCase )
return results, sampler.num_replicas
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=_UpperCAmelCase , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=_UpperCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=_UpperCAmelCase , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=_UpperCAmelCase , default=_UpperCAmelCase )
parser.add_argument(
'''--type_path''' , type=_UpperCAmelCase , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=_UpperCAmelCase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_UpperCAmelCase , default=8 , required=_UpperCAmelCase , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=_UpperCAmelCase , default=1 , required=_UpperCAmelCase , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=_UpperCAmelCase , default=6_00 , required=_UpperCAmelCase , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase )
parser.add_argument('''--tgt_lang''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase )
parser.add_argument(
'''--prefix''' , type=_UpperCAmelCase , required=_UpperCAmelCase , default=_UpperCAmelCase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__UpperCAmelCase : Any = time.time()
__UpperCAmelCase , __UpperCAmelCase : Any = parser.parse_known_args()
__UpperCAmelCase : List[Any] = parse_numeric_n_bool_cl_kwargs(_UpperCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(f'parsed the following generate kwargs: {generate_kwargs}' )
__UpperCAmelCase : Union[str, Any] = Path(args.save_dir + '''_tmp''' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) # this handles locking.
__UpperCAmelCase : int = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__UpperCAmelCase : List[Any] = {}
if args.src_lang is not None:
__UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
__UpperCAmelCase : List[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = eval_data_dir(
args.data_dir , _UpperCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_UpperCAmelCase , **_UpperCAmelCase , )
if args.local_rank <= 0:
__UpperCAmelCase : int = Path(args.save_dir )
save_dir.mkdir(exist_ok=_UpperCAmelCase )
__UpperCAmelCase : List[str] = gather_results_from_each_node(_UpperCAmelCase , _UpperCAmelCase , args.sync_timeout )
__UpperCAmelCase : List[Any] = combine_partial_results(_UpperCAmelCase )
if args.num_return_sequences > 1:
__UpperCAmelCase : int = save_dir.joinpath('''pseudolabel_results.json''' )
print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(_UpperCAmelCase , _UpperCAmelCase )
return
__UpperCAmelCase : str = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(_UpperCAmelCase ) as f:
__UpperCAmelCase : int = [x.rstrip() for x in f.readlines()][: len(_UpperCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
__UpperCAmelCase : Optional[Any] = '''translation''' in args.task
__UpperCAmelCase : Optional[int] = calculate_bleu if calc_bleu else calculate_rouge
__UpperCAmelCase : Union[str, Any] = '''bleu''' if calc_bleu else '''rouge'''
__UpperCAmelCase : Dict = score_fn(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = len(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = time.time() - start_time
__UpperCAmelCase : List[str] = round(runtime / metrics['''n_obs'''] , 4 )
__UpperCAmelCase : List[str] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__UpperCAmelCase : List[Any] = save_dir.joinpath(f'{args.type_path}_{metric_name}.json' )
save_json(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase )
print(_UpperCAmelCase )
write_txt_file(_UpperCAmelCase , save_dir.joinpath(f'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(_UpperCAmelCase , save_dir.joinpath(f'{args.type_path}.target' ) )
else:
shutil.rmtree(_UpperCAmelCase )
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = []
for partial_result in partial_results:
records.extend(_UpperCAmelCase )
__UpperCAmelCase : List[str] = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["id"] )
__UpperCAmelCase : Union[str, Any] = [x['''pred'''] for x in records]
return preds
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = time.time()
logger.info('''waiting for all nodes to finish''' )
__UpperCAmelCase : Any = None
while (time.time() - start_wait) < timeout:
__UpperCAmelCase : List[Any] = list(save_dir.glob('''rank_*.json''' ) )
if len(_UpperCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__UpperCAmelCase : Union[str, Any] = lmap(_UpperCAmelCase , _UpperCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 226 | 0 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Any = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
UpperCamelCase_: str = load_dataset("""ashraq/esc50""" )
UpperCamelCase_: Optional[int] = dataset["""train"""]["""audio"""][-1]["""array"""]
UpperCamelCase_: Optional[int] = audio_classifier(snake_case_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
@slow
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[str] = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
UpperCamelCase_: Tuple = load_dataset("""ashraq/esc50""" )
UpperCamelCase_: Optional[int] = dataset["""train"""]["""audio"""][-1]["""array"""]
UpperCamelCase_: int = audio_classifier(snake_case_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
UpperCamelCase_: Any = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
UpperCamelCase_: Any = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
| 223 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = PegasusConfig
__UpperCamelCase : str = {}
__UpperCamelCase : Optional[Any] = """gelu"""
def __init__( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : str=13 , snake_case_ : Dict=7 , snake_case_ : List[Any]=True , snake_case_ : Optional[int]=False , snake_case_ : Any=99 , snake_case_ : Optional[Any]=32 , snake_case_ : Dict=2 , snake_case_ : Any=4 , snake_case_ : Optional[Any]=37 , snake_case_ : Dict=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : List[str]=40 , snake_case_ : Tuple=2 , snake_case_ : Optional[int]=1 , snake_case_ : str=0 , ):
UpperCamelCase_: List[str] = parent
UpperCamelCase_: Optional[Any] = batch_size
UpperCamelCase_: Union[str, Any] = seq_length
UpperCamelCase_: Tuple = is_training
UpperCamelCase_: Tuple = use_labels
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: Optional[Any] = num_hidden_layers
UpperCamelCase_: List[Any] = num_attention_heads
UpperCamelCase_: Optional[int] = intermediate_size
UpperCamelCase_: Dict = hidden_dropout_prob
UpperCamelCase_: str = attention_probs_dropout_prob
UpperCamelCase_: Optional[int] = max_position_embeddings
UpperCamelCase_: Union[str, Any] = eos_token_id
UpperCamelCase_: Optional[int] = pad_token_id
UpperCamelCase_: List[Any] = bos_token_id
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_: int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_: List[str] = prepare_pegasus_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def lowerCAmelCase__ ( self : Any , snake_case_ : List[str] , snake_case_ : Dict ):
UpperCamelCase_: Any = TFPegasusModel(config=snake_case_ ).get_decoder()
UpperCamelCase_: Any = inputs_dict["""input_ids"""]
UpperCamelCase_: int = input_ids[:1, :]
UpperCamelCase_: List[str] = inputs_dict["""attention_mask"""][:1, :]
UpperCamelCase_: Tuple = inputs_dict["""head_mask"""]
UpperCamelCase_: int = 1
# first forward pass
UpperCamelCase_: Dict = model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_ )
UpperCamelCase_, UpperCamelCase_: List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_: Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_: Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_: Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_: List[Any] = model(snake_case_ , attention_mask=snake_case_ )[0]
UpperCamelCase_: Dict = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_: str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_: str = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_: int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1e-3 )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if attention_mask is None:
UpperCamelCase_: Union[str, Any] = tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase_: str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_: str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__UpperCamelCase : str = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : int = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Any = False
__UpperCamelCase : Dict = False
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = TFPegasusModelTester(self )
UpperCamelCase_: List[Any] = ConfigTester(self , config_class=snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCamelCase : Optional[int] = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__UpperCamelCase : Union[str, Any] = """google/pegasus-xsum"""
@cached_property
def lowerCAmelCase__ ( self : Dict ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCAmelCase__ ( self : Union[str, Any] , **snake_case_ : Optional[int] ):
UpperCamelCase_: str = self.translate_src_text(**snake_case_ )
assert self.expected_text == generated_words
def lowerCAmelCase__ ( self : Optional[Any] , **snake_case_ : int ):
UpperCamelCase_: Tuple = self.tokenizer(self.src_text , **snake_case_ , padding=snake_case_ , return_tensors="""tf""" )
UpperCamelCase_: Tuple = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case_ , )
UpperCamelCase_: Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case_ )
return generated_words
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
self._assert_generated_batch_equal_expected()
| 223 | 1 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__a: str = False
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self , __lowerCAmelCase=32 ) -> List[Any]:
set_seed(0 )
lowercase__ : Union[str, Any] = UNetaDModel(sample_size=__lowerCAmelCase , in_channels=3 , out_channels=3 )
lowercase__ : Dict = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1 )
return model, optimizer
@slow
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowercase__ : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='''linear''' , clip_sample=__lowerCAmelCase , )
lowercase__ : Optional[int] = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='''linear''' , clip_sample=__lowerCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowercase__ : int = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(__lowerCAmelCase ) for _ in range(4 )]
lowercase__ : str = [torch.randn((4, 3, 32, 32) ).to(__lowerCAmelCase ) for _ in range(4 )]
lowercase__ : Any = [torch.randint(0 , 1000 , (4,) ).long().to(__lowerCAmelCase ) for _ in range(4 )]
# train with a DDPM scheduler
lowercase__ , lowercase__ : Optional[int] = self.get_model_optimizer(resolution=32 )
model.train().to(__lowerCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
lowercase__ : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase__ : int = model(__lowerCAmelCase , timesteps[i] ).sample
lowercase__ : Optional[Any] = torch.nn.functional.mse_loss(__lowerCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowercase__ , lowercase__ : int = self.get_model_optimizer(resolution=32 )
model.train().to(__lowerCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
lowercase__ : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase__ : List[Any] = model(__lowerCAmelCase , timesteps[i] ).sample
lowercase__ : List[str] = torch.nn.functional.mse_loss(__lowerCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
| 198 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase = 1 , UpperCAmelCase = 1000 ):
lowercase__ : Dict = 1
lowercase__ : Dict = 0
for divide_by_number in range(UpperCAmelCase , digit + 1 ):
lowercase__ : list[int] = []
lowercase__ : Union[str, Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCAmelCase ):
lowercase__ : Dict = len(UpperCAmelCase )
lowercase__ : Optional[Any] = divide_by_number
else:
has_been_divided.append(UpperCAmelCase )
lowercase__ : int = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = ["pixel_values"]
def __init__( self: List[Any] , __A: bool = True , __A: Dict[str, int] = None , __A: PILImageResampling = PILImageResampling.BICUBIC , __A: bool = True , __A: Dict[str, int] = None , __A: bool = True , __A: Union[int, float] = 1 / 2_55 , __A: bool = True , __A: Optional[Union[float, List[float]]] = None , __A: Optional[Union[float, List[float]]] = None , __A: bool = True , **__A: Dict , ) -> None:
super().__init__(**__A )
_A = size if size is not None else {'''shortest_edge''': 2_24}
_A = get_size_dict(__A , default_to_square=__A )
_A = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_A = get_size_dict(__A , default_to_square=__A , param_name='''crop_size''' )
_A = do_resize
_A = size
_A = resample
_A = do_center_crop
_A = crop_size
_A = do_rescale
_A = rescale_factor
_A = do_normalize
_A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_A = image_std if image_std is not None else OPENAI_CLIP_STD
_A = do_convert_rgb
def __A ( self: Union[str, Any] , __A: np.ndarray , __A: Dict[str, int] , __A: PILImageResampling = PILImageResampling.BICUBIC , __A: Optional[Union[str, ChannelDimension]] = None , **__A: Optional[Any] , ) -> np.ndarray:
_A = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_A = get_resize_output_image_size(__A , size=size['''shortest_edge'''] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def __A ( self: Union[str, Any] , __A: np.ndarray , __A: Dict[str, int] , __A: Optional[Union[str, ChannelDimension]] = None , **__A: Dict , ) -> np.ndarray:
_A = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__A , size=(size['''height'''], size['''width''']) , data_format=__A , **__A )
def __A ( self: Optional[Any] , __A: np.ndarray , __A: Union[int, float] , __A: Optional[Union[str, ChannelDimension]] = None , **__A: str , ) -> Any:
return rescale(__A , scale=__A , data_format=__A , **__A )
def __A ( self: List[Any] , __A: np.ndarray , __A: Union[float, List[float]] , __A: Union[float, List[float]] , __A: Optional[Union[str, ChannelDimension]] = None , **__A: str , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def __A ( self: List[Any] , __A: ImageInput , __A: bool = None , __A: Dict[str, int] = None , __A: PILImageResampling = None , __A: bool = None , __A: int = None , __A: bool = None , __A: float = None , __A: bool = None , __A: Optional[Union[float, List[float]]] = None , __A: Optional[Union[float, List[float]]] = None , __A: bool = None , __A: Optional[Union[str, TensorType]] = None , __A: Optional[ChannelDimension] = ChannelDimension.FIRST , **__A: Tuple , ) -> PIL.Image.Image:
_A = do_resize if do_resize is not None else self.do_resize
_A = size if size is not None else self.size
_A = get_size_dict(__A , param_name='''size''' , default_to_square=__A )
_A = resample if resample is not None else self.resample
_A = do_center_crop if do_center_crop is not None else self.do_center_crop
_A = crop_size if crop_size is not None else self.crop_size
_A = get_size_dict(__A , param_name='''crop_size''' , default_to_square=__A )
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = rescale_factor if rescale_factor is not None else self.rescale_factor
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = image_mean if image_mean is not None else self.image_mean
_A = image_std if image_std is not None else self.image_std
_A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_A = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_A = [convert_to_rgb(__A ) for image in images]
# All transformations expect numpy arrays.
_A = [to_numpy_array(__A ) for image in images]
if do_resize:
_A = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
_A = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
_A = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
_A = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
_A = [to_channel_dimension_format(__A , __A ) for image in images]
_A = {'''pixel_values''': images}
return BatchFeature(data=__A , tensor_type=__A )
| 75 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 42
A_ = 42
A_ = None
class SCREAMING_SNAKE_CASE ( snake_case , snake_case ):
"""simple docstring"""
A_ = 2
@register_to_config
def __init__( self: List[str] , __A: float = 0.02 , __A: float = 1_00 , __A: float = 1.007 , __A: float = 80 , __A: float = 0.05 , __A: float = 50 , ) -> Optional[int]:
# standard deviation of the initial noise distribution
_A = sigma_max
# setable values
_A = None
_A = None
_A = None # sigma(t_i)
def __A ( self: Any , __A: torch.FloatTensor , __A: Optional[int] = None ) -> torch.FloatTensor:
return sample
def __A ( self: Union[str, Any] , __A: int , __A: Union[str, torch.device] = None ) -> List[Any]:
_A = num_inference_steps
_A = np.arange(0 , self.num_inference_steps )[::-1].copy()
_A = torch.from_numpy(__A ).to(__A )
_A = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_A = torch.tensor(__A , dtype=torch.floataa , device=__A )
def __A ( self: List[Any] , __A: torch.FloatTensor , __A: float , __A: Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_A = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_A = 0
# sample eps ~ N(0, S_noise^2 * I)
_A = self.config.s_noise * randn_tensor(sample.shape , generator=__A ).to(sample.device )
_A = sigma + gamma * sigma
_A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __A ( self: Optional[Any] , __A: torch.FloatTensor , __A: float , __A: float , __A: torch.FloatTensor , __A: bool = True , ) -> Union[KarrasVeOutput, Tuple]:
_A = sample_hat + sigma_hat * model_output
_A = (sample_hat - pred_original_sample) / sigma_hat
_A = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__A , derivative=__A , pred_original_sample=__A )
def __A ( self: Dict , __A: torch.FloatTensor , __A: float , __A: float , __A: torch.FloatTensor , __A: torch.FloatTensor , __A: torch.FloatTensor , __A: bool = True , ) -> Union[KarrasVeOutput, Tuple]:
_A = sample_prev + sigma_prev * model_output
_A = (sample_prev - pred_original_sample) / sigma_prev
_A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__A , derivative=__A , pred_original_sample=__A )
def __A ( self: Any , __A: List[Any] , __A: int , __A: int ) -> int:
raise NotImplementedError()
| 75 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
)
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
lowerCAmelCase__ : Optional[int] = math.log(len(UpperCamelCase ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , UpperCamelCase , UpperCamelCase , UpperCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 37 |
'''simple docstring'''
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
# Initialize Result
__SCREAMING_SNAKE_CASE = []
# Traverse through all denomination
for denomination in reversed(a__ ):
# Find denominations
while int(a__ ) >= int(a__ ):
total_value -= int(a__ )
answer.append(a__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
UpperCAmelCase : List[str] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
UpperCAmelCase : str = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase : int = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
UpperCAmelCase : Any = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f"""Following is minimal change for {value}: """)
UpperCAmelCase : Any = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 267 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Dict = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 287 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase = controlnet_params
_UpperCamelCase = '''bird'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
_UpperCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(lowerCAmelCase__ , jax.device_count() )
_UpperCamelCase = replicate(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase = controlnet_params
_UpperCamelCase = '''Chef in the kitchen'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
_UpperCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(lowerCAmelCase__ , jax.device_count() )
_UpperCamelCase = replicate(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 287 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : int =[
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase__ ( __lowercase : Dict ) -> Tuple:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
__UpperCamelCase = k.replace(__lowercase , __lowercase )
return k
def lowercase__ ( __lowercase : dict , __lowercase : dict ) -> PegasusForConditionalGeneration:
"""simple docstring"""
__UpperCamelCase = DEFAULTS.copy()
cfg_kwargs.update(__lowercase )
__UpperCamelCase = PegasusConfig(**__lowercase )
__UpperCamelCase = PegasusForConditionalGeneration(__lowercase )
__UpperCamelCase = torch_model.model.state_dict()
__UpperCamelCase = {}
for k, v in tf_weights.items():
__UpperCamelCase = rename_state_dict_key(__lowercase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
__UpperCamelCase = v.T
__UpperCamelCase = torch.tensor(__lowercase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
__UpperCamelCase = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__UpperCamelCase = mapping['shared.weight']
__UpperCamelCase = mapping['shared.weight']
__UpperCamelCase = {k: torch.zeros_like(__lowercase ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**__lowercase )
__UpperCamelCase , __UpperCamelCase = torch_model.model.load_state_dict(__lowercase , strict=__lowercase )
__UpperCamelCase = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def lowercase__ ( __lowercase : Optional[Any]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
"""simple docstring"""
__UpperCamelCase = tf.train.list_variables(__lowercase )
__UpperCamelCase = {}
__UpperCamelCase = ['Adafactor', 'global_step']
for name, shape in tqdm(__lowercase , desc='converting tf checkpoint to dict' ):
__UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCamelCase = tf.train.load_variable(__lowercase , __lowercase )
__UpperCamelCase = array
return tf_weights
def lowercase__ ( __lowercase : str , __lowercase : str ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = Path(__lowercase ).parent.name
__UpperCamelCase = task_specific_params[F'''summarization_{dataset}''']['max_position_embeddings']
__UpperCamelCase = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=__lowercase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowercase )
# convert model
__UpperCamelCase = get_tf_weights_as_numpy(__lowercase )
__UpperCamelCase = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
__UpperCamelCase = task_specific_params
__UpperCamelCase = convert_pegasus(__lowercase , __lowercase )
torch_model.save_pretrained(__lowercase )
__UpperCamelCase = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(__lowercase , Path(__lowercase ) / 'pytorch_model.bin' )
if __name__ == "__main__":
a__ : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : int =parser.parse_args()
if args.save_dir is None:
a__ : str =Path(args.tf_ckpt_path).parent.name
a__ : str =os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 53 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ):
__UpperCamelCase = False
super().__init__(__A , __A )
__UpperCamelCase = self.image_processor
def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__UpperCamelCase = self.tokenizer
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
return text_encoding
# add pixel_values
__UpperCamelCase = self.image_processor(__A , return_tensors=__A )
if text is not None:
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
else:
__UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(__A )
return encoding_image_processor
def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ):
return self.tokenizer.decode(*__A , **__A )
@property
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 53 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = '''switch_transformers'''
_A : Any = ['''past_key_values''']
_A : Any = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Union[str, Any] , __a : str=32128 , __a : Dict=768 , __a : str=64 , __a : List[str]=2048 , __a : List[Any]=64 , __a : Union[str, Any]=12 , __a : Optional[Any]=3 , __a : str=12 , __a : int=3 , __a : Tuple=12 , __a : Any=8 , __a : List[str]=False , __a : Union[str, Any]=0.01 , __a : List[Any]="float32" , __a : Tuple=False , __a : List[Any]=32 , __a : int=128 , __a : List[Any]=0.1 , __a : str=1E-6 , __a : Optional[Any]=0.001 , __a : Optional[int]=0.001 , __a : str=1.0 , __a : List[Any]="relu" , __a : Dict=True , __a : Optional[Any]=False , __a : Optional[Any]=True , __a : Optional[Any]=0 , __a : Union[str, Any]=1 , **__a : Optional[int] , ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = vocab_size
__lowercase : List[Any] = d_model
__lowercase : Dict = d_kv
__lowercase : List[Any] = d_ff
__lowercase : Dict = num_sparse_encoder_layers
__lowercase : int = num_layers
__lowercase : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase : Union[str, Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__lowercase : str = self.num_layers // self.num_sparse_encoder_layers
else:
__lowercase : str = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__lowercase : int = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__lowercase : str = self.num_decoder_layers # HACK: this will create 0 sparse layers
__lowercase : Optional[Any] = num_heads
__lowercase : Any = num_experts
__lowercase : Optional[int] = expert_capacity
__lowercase : Optional[int] = router_bias
__lowercase : List[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase : int = router_dtype
__lowercase : List[str] = router_ignore_padding_tokens
__lowercase : str = relative_attention_num_buckets
__lowercase : List[Any] = relative_attention_max_distance
__lowercase : Any = dropout_rate
__lowercase : Dict = layer_norm_epsilon
__lowercase : Optional[Any] = initializer_factor
__lowercase : int = feed_forward_proj
__lowercase : Tuple = use_cache
__lowercase : Optional[Any] = add_router_probs
__lowercase : Optional[Any] = router_z_loss_coef
__lowercase : List[str] = router_aux_loss_coef
__lowercase : Union[str, Any] = self.feed_forward_proj.split("""-""" )
__lowercase : List[str] = act_info[-1]
__lowercase : Optional[int] = act_info[0] == """gated"""
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__lowercase : List[Any] = """gelu_new"""
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , ) | 306 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a ) | 306 | 1 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class lowerCAmelCase :
def __init__( self : str , __lowercase : int | None = None ):
"""simple docstring"""
__lowercase =value
__lowercase =None # Added in order to delete a node easier
__lowercase =None
__lowercase =None
def __repr__( self : Tuple ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} , indent=1 )
class lowerCAmelCase :
def __init__( self : List[str] , __lowercase : Node | None = None ):
"""simple docstring"""
__lowercase =root
def __str__( self : int ):
"""simple docstring"""
return str(self.root )
def snake_case ( self : Optional[int] , __lowercase : Node , __lowercase : Node | None ):
"""simple docstring"""
if new_children is not None: # reset its kids
__lowercase =node.parent
if node.parent is not None: # reset its parent
if self.is_right(__lowercase ): # If it is the right children
__lowercase =new_children
else:
__lowercase =new_children
else:
__lowercase =new_children
def snake_case ( self : Optional[int] , __lowercase : Node ):
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def snake_case ( self : str ):
"""simple docstring"""
return self.root is None
def snake_case ( self : List[str] , __lowercase : Optional[int] ):
"""simple docstring"""
__lowercase =Node(__lowercase ) # create a new Node
if self.empty(): # if Tree is empty
__lowercase =new_node # set its root
else: # Tree is not empty
__lowercase =self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__lowercase =new_node # We insert the new node in a leaf
break
else:
__lowercase =parent_node.left
else:
if parent_node.right is None:
__lowercase =new_node
break
else:
__lowercase =parent_node.right
__lowercase =parent_node
def snake_case ( self : Any , *__lowercase : Optional[Any] ):
"""simple docstring"""
for value in values:
self.__insert(__lowercase )
def snake_case ( self : List[str] , __lowercase : int ):
"""simple docstring"""
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
__lowercase =self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__lowercase =node.left if value < node.value else node.right
return node
def snake_case ( self : str , __lowercase : Node | None = None ):
"""simple docstring"""
if node is None:
if self.root is None:
return None
__lowercase =self.root
if not self.empty():
while node.right is not None:
__lowercase =node.right
return node
def snake_case ( self : List[Any] , __lowercase : Node | None = None ):
"""simple docstring"""
if node is None:
__lowercase =self.root
if self.root is None:
return None
if not self.empty():
__lowercase =self.root
while node.left is not None:
__lowercase =node.left
return node
def snake_case ( self : Tuple , __lowercase : int ):
"""simple docstring"""
__lowercase =self.search(__lowercase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__lowercase , __lowercase )
elif node.left is None: # Has only right children
self.__reassign_nodes(__lowercase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__lowercase , node.left )
else:
__lowercase =self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__lowercase =(
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def snake_case ( self : List[str] , __lowercase : Node | None ):
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def snake_case ( self : List[Any] , __lowercase : Optional[int]=None ):
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def snake_case ( self : Any , __lowercase : list , __lowercase : Node | None ):
"""simple docstring"""
if node:
self.inorder(__lowercase , node.left )
arr.append(node.value )
self.inorder(__lowercase , node.right )
def snake_case ( self : List[Any] , __lowercase : int , __lowercase : Node ):
"""simple docstring"""
__lowercase =[]
self.inorder(__lowercase , __lowercase ) # append all values to list using inorder traversal
return arr[k - 1]
def __UpperCamelCase ( lowercase__ : Node | None ):
'''simple docstring'''
__lowercase =[]
if curr_node is not None:
__lowercase =postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =(8, 3, 6, 1, 10, 14, 13, 4, 7)
__lowercase =BinarySearchTree()
for i in testlist:
t.insert(lowercase__ )
# Prints all the elements of the list in order traversal
print(lowercase__ )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ', t.get_max().value ) # type: ignore
print('Min Value: ', t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowercase__ )
print(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 141 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFImgaImgSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case ( self : int ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[str]=0 ):
"""simple docstring"""
if str(__lowercase ).startswith('mps' ):
__lowercase =torch.manual_seed(__lowercase )
else:
__lowercase =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase =floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def snake_case ( self : int ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case ( self : str ):
"""simple docstring"""
self._test_save_load_local()
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 141 | 1 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, **lowercase_ ) -> List[str]:
super().__init__(**lowercase_ )
requires_backends(self, 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self, lowercase_, **lowercase_ ) -> Union[str, Any]:
return super().__call__(lowercase_, **lowercase_ )
def _lowerCamelCase ( self, **lowercase_ ) -> Union[str, Any]:
snake_case = {}
if "candidate_labels" in kwargs:
snake_case = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
snake_case = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _lowerCamelCase ( self, lowercase_, lowercase_=None, lowercase_="This is a photo of {}." ) -> Optional[int]:
snake_case = load_image(lowercase_ )
snake_case = self.image_processor(images=[image], return_tensors=self.framework )
snake_case = candidate_labels
snake_case = [hypothesis_template.format(lowercase_ ) for x in candidate_labels]
snake_case = self.tokenizer(lowercase_, return_tensors=self.framework, padding=lowercase_ )
snake_case = [text_inputs]
return inputs
def _lowerCamelCase ( self, lowercase_ ) -> Optional[int]:
snake_case = model_inputs.pop('candidate_labels' )
snake_case = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0], lowercase_ ):
snake_case = text_inputs[0]
else:
# Batching case.
snake_case = text_inputs[0][0]
snake_case = self.model(**lowercase_, **lowercase_ )
snake_case = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self, lowercase_ ) -> int:
snake_case = model_outputs.pop('candidate_labels' )
snake_case = model_outputs['logits'][0]
if self.framework == "pt":
snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
snake_case = probs.tolist()
if not isinstance(lowercase_, lowercase_ ):
snake_case = [scores]
elif self.framework == "tf":
snake_case = stable_softmax(lowercase_, axis=-1 )
snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowercase_, lowercase_ ), key=lambda lowercase_ : -x[0] )
]
return result
| 332 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
snake_case_ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
snake_case_ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
snake_case_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''train'''
snake_case_ = '''dev'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_, lowercase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = 'v2' if args.version_2_with_negative else 'v1'
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['features']
snake_case = self.old_features.get('dataset', lowercase_ )
snake_case = self.old_features.get('examples', lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, )
snake_case = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids, dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask, dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long )
snake_case = torch.tensor(feature.cls_index, dtype=torch.long )
snake_case = torch.tensor(feature.p_mask, dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible, dtype=torch.float )
snake_case = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position, dtype=torch.long )
snake_case = torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 332 | 1 |
'''simple docstring'''
import numpy as np
from PIL import Image
def a_ ( __snake_case : np.ndarray , __snake_case : int , __snake_case : int ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =np.array(__snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =0
# compute the shape of the output matrix
lowerCamelCase_ =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCamelCase_ =np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCamelCase_ =np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase_ =0
lowerCamelCase_ =0
return updated_arr
def a_ ( __snake_case : np.ndarray , __snake_case : int , __snake_case : int ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =np.array(__snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =0
# compute the shape of the output matrix
lowerCamelCase_ =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCamelCase_ =np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCamelCase_ =int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase_ =0
lowerCamelCase_ =0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
a_ : Union[str, Any] = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 75 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ ={
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
lowerCamelCase_ ={
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.feature_extraction_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
# load decoder from hub
lowerCamelCase_ ='''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor, lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, )
self.assertIsInstance(processor.decoder, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha, 5.0 )
self.assertEqual(processor.language_model.beta, 3.0 )
self.assertEqual(processor.language_model.score_boundary, -7.0 )
self.assertEqual(processor.language_model.unk_score_offset, 3 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(lowerCAmelCase, '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =floats_list((3, 1_000) )
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor(lowerCAmelCase, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ ='''This is a test string'''
lowerCamelCase_ =processor(text=lowerCAmelCase )
lowerCamelCase_ =tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self, lowerCAmelCase=(2, 10, 16), lowerCAmelCase=77 ):
"""simple docstring"""
np.random.seed(lowerCAmelCase )
return np.random.rand(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits(shape=(10, 16), seed=13 )
lowerCamelCase_ =processor.decode(lowerCAmelCase )
lowerCamelCase_ =decoder.decode_beams(lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0], decoded_processor.text )
self.assertEqual('''</s> <s> </s>''', decoded_processor.text )
self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase )
else:
with get_context(lowerCAmelCase ).Pool() as pool:
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =list(lowerCAmelCase )
with get_context('''fork''' ).Pool() as p:
lowerCamelCase_ =decoder.decode_beams_batch(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =[], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase, decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''], decoded_processor.text )
self.assertListEqual(lowerCAmelCase, decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase, decoded_processor.lm_score )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =15
lowerCamelCase_ =-2_0.0
lowerCamelCase_ =-4.0
lowerCamelCase_ =processor.batch_decode(
lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, )
lowerCamelCase_ =decoded_processor_out.text
lowerCamelCase_ =list(lowerCAmelCase )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase_ =decoder.decode_beams_batch(
lowerCAmelCase, lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, )
lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out]
lowerCamelCase_ =[d[0][2] for d in decoded_decoder_out]
lowerCamelCase_ =[d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''], lowerCAmelCase )
self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7], lowerCAmelCase, atol=1e-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4], lowerCAmelCase, atol=1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =2.0
lowerCamelCase_ =5.0
lowerCamelCase_ =-2_0.0
lowerCamelCase_ =True
lowerCamelCase_ =processor.batch_decode(
lowerCAmelCase, alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, )
lowerCamelCase_ =decoded_processor_out.text
lowerCamelCase_ =list(lowerCAmelCase )
decoder.reset_params(
alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase_ =decoder.decode_beams_batch(
lowerCAmelCase, lowerCAmelCase, )
lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''], lowerCAmelCase )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha, 2.0 )
self.assertEqual(lm_model.beta, 5.0 )
self.assertEqual(lm_model.unk_score_offset, -2_0.0 )
self.assertEqual(lm_model.score_boundary, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase_ =os.listdir(lowerCAmelCase )
lowerCamelCase_ =['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =snapshot_download('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase_ =os.listdir(lowerCAmelCase )
lowerCamelCase_ =os.listdir(lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =floats_list((3, 1_000) )
lowerCamelCase_ =processor_wavaveca(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor_auto(lowerCAmelCase, return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =processor_wavaveca.batch_decode(lowerCAmelCase )
lowerCamelCase_ =processor_auto.batch_decode(lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text, decoded_auto.text )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
self.assertListEqual(
processor.model_input_names, feature_extractor.model_input_names, msg='''`processor` and `feature_extractor` model input names do not match''', )
@staticmethod
def lowercase__ ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[d[key] for d in offsets]
return retrieved_list
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =self._get_dummy_logits()[0]
lowerCamelCase_ =processor.decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ) ), outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''start_offset''' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''end_offset''' ), [1, 3, 5] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ) for o in outputs['''word_offsets''']], outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''start_offset''' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''end_offset''' ), [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase__ ( self ):
"""simple docstring"""
import torch
lowerCamelCase_ =load_dataset('''common_voice''', '''en''', split='''train''', streaming=lowerCAmelCase )
lowerCamelCase_ =ds.cast_column('''audio''', datasets.Audio(sampling_rate=16_000 ) )
lowerCamelCase_ =iter(lowerCAmelCase )
lowerCamelCase_ =next(lowerCAmelCase )
lowerCamelCase_ =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
lowerCamelCase_ =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCamelCase_ =processor(sample['''audio''']['''array'''], return_tensors='''pt''' ).input_values
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).logits.cpu().numpy()
lowerCamelCase_ =processor.decode(logits[0], output_word_offsets=lowerCAmelCase )
lowerCamelCase_ =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCamelCase_ =[
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
lowerCamelCase_ ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), lowerCAmelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), output.text )
# output times
lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''start_time''' ) )
lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''end_time''' ) )
# fmt: off
lowerCamelCase_ =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
lowerCamelCase_ =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
| 75 | 1 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class A ( datasets.BuilderConfig ):
UpperCamelCase_ : Optional[datasets.Features] =None
def _lowerCamelCase( lowercase__ , lowercase__ , ) -> List[str]:
'''simple docstring'''
import pyspark
def generate_fn():
__lowercase= df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__lowercase= df_with_partition_id.select('*' ).where(F'part_id = {partition_id}' ).drop('part_id' )
__lowercase= partition_df.collect()
__lowercase= 0
for row in rows:
yield F'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class A ( _BaseExamplesIterable ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=None , ):
__lowercase= df
__lowercase= partition_order or range(self.df.rdd.getNumPartitions() )
__lowercase= _generate_iterable_examples(self.df , self.partition_order )
def __iter__(self ):
yield from self.generate_examples_fn()
def _A (self , lowerCAmelCase ):
__lowercase= list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.split_shard_indices_by_worker(lowerCAmelCase , lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase )
@property
def _A (self ):
return len(self.partition_order )
class A ( datasets.DatasetBuilder ):
UpperCamelCase_ : Optional[Any] =SparkConfig
def __init__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
import pyspark
__lowercase= pyspark.sql.SparkSession.builder.getOrCreate()
__lowercase= df
__lowercase= working_dir
super().__init__(
cache_dir=lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **lowerCAmelCase , )
def _A (self ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCAmelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCAmelCase )
__lowercase= os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCAmelCase , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowercase= (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def _A (self ):
return datasets.DatasetInfo(features=self.config.features )
def _A (self , lowerCAmelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _A (self , lowerCAmelCase ):
import pyspark
def get_arrow_batch_size(lowerCAmelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__lowercase= self.df.count()
__lowercase= df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowercase= (
self.df.limit(lowerCAmelCase )
.repartition(1 )
.mapInArrow(lowerCAmelCase , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowercase= approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowercase= min(lowerCAmelCase , int(approx_total_size / max_shard_size ) )
__lowercase= self.df.repartition(lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
import pyspark
__lowercase= ParquetWriter if file_format == 'parquet' else ArrowWriter
__lowercase= os.path.join(self._working_dir , os.path.basename(lowerCAmelCase ) ) if self._working_dir else fpath
__lowercase= file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowercase= self.config.features
__lowercase= self._writer_batch_size
__lowercase= self._fs.storage_options
def write_arrow(lowerCAmelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowercase= pyspark.TaskContext().taskAttemptId()
__lowercase= next(lowerCAmelCase , lowerCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__lowercase= 0
__lowercase= writer_class(
features=lowerCAmelCase , path=working_fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , writer_batch_size=lowerCAmelCase , storage_options=lowerCAmelCase , embed_local_files=lowerCAmelCase , )
__lowercase= pa.Table.from_batches([first_batch] )
writer.write_table(lowerCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowercase, __lowercase= writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__lowercase= writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , writer_batch_size=lowerCAmelCase , storage_options=lowerCAmelCase , embed_local_files=lowerCAmelCase , )
__lowercase= pa.Table.from_batches([batch] )
writer.write_table(lowerCAmelCase )
if writer._num_bytes > 0:
__lowercase, __lowercase= writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCAmelCase ) ):
__lowercase= os.path.join(os.path.dirname(lowerCAmelCase ) , os.path.basename(lowerCAmelCase ) )
shutil.move(lowerCAmelCase , lowerCAmelCase )
__lowercase= (
self.df.mapInArrow(lowerCAmelCase , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _A (self , lowerCAmelCase , lowerCAmelCase = "arrow" , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
self._validate_cache_dir()
__lowercase= convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCAmelCase )
__lowercase= not is_remote_filesystem(self._fs )
__lowercase= os.path.join if is_local else posixpath.join
__lowercase= '-TTTTT-SSSSS-of-NNNNN'
__lowercase= f'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
__lowercase= path_join(self._output_dir , lowerCAmelCase )
__lowercase= 0
__lowercase= 0
__lowercase= 0
__lowercase= []
__lowercase= []
for task_id, content in self._prepare_split_single(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCAmelCase )
__lowercase= total_num_examples
__lowercase= total_num_bytes
# should rename everything at the end
logger.debug(f'Renaming {total_shards} shards.' )
if total_shards > 1:
__lowercase= all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowercase= self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
rename(
lowerCAmelCase , fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , fpath.replace('TTTTT-SSSSS' , f'{global_shard_id:05d}' ).replace('NNNNN' , f'{total_shards:05d}' ) , )
__lowercase= []
__lowercase= 0
for i in range(len(lowerCAmelCase ) ):
__lowercase, __lowercase= task_id_and_num_shards[i]
for shard_id in range(lowerCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCAmelCase , len(lowerCAmelCase ) ).map(lambda lowerCAmelCase : _rename_shard(*lowerCAmelCase ) ).collect()
else:
# don't use any pattern
__lowercase= 0
__lowercase= task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , fpath.replace(lowerCAmelCase , '' ) , )
def _A (self , lowerCAmelCase , ):
return SparkExamplesIterable(self.df )
| 304 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 79 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCamelCase_ = logging.getLogger(__name__)
def __lowercase ( __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
if os.path.exists(__lowercase ):
if os.path.exists(os.path.join(__lowercase , "config.json" ) ) and os.path.isfile(
os.path.join(__lowercase , "config.json" ) ):
os.remove(os.path.join(__lowercase , "config.json" ) )
if os.path.exists(os.path.join(__lowercase , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(__lowercase , "pytorch_model.bin" ) ):
os.remove(os.path.join(__lowercase , "pytorch_model.bin" ) )
else:
os.makedirs(__lowercase )
model.save_pretrained(__lowercase )
def __lowercase ( __lowercase , __lowercase=False ) -> Optional[int]:
'''simple docstring'''
_A = 2
if unlogit:
_A = torch.pow(__lowercase , __lowercase )
_A = p * torch.log(__lowercase )
_A = 0
return -plogp.sum(dim=-1 )
def __lowercase ( __lowercase ) -> Optional[Any]:
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__lowercase ) ) ) )
for row in range(len(__lowercase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase=False ) -> int:
'''simple docstring'''
_A , _A = model.config.num_hidden_layers, model.config.num_attention_heads
_A = torch.zeros(__lowercase , __lowercase ).to(args.device )
_A = torch.zeros(__lowercase , __lowercase ).to(args.device )
if head_mask is None:
_A = torch.ones(__lowercase , __lowercase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowercase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_A = None
_A = 0.0
_A = 0.0
for step, inputs in enumerate(tqdm(__lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
_A = tuple(t.to(args.device ) for t in inputs )
((_A) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_A = model(__lowercase , labels=__lowercase , head_mask=__lowercase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_A , _A , _A = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowercase ):
_A = entropy(attn.detach() , __lowercase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowercase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_A = 2
_A = torch.pow(torch.pow(__lowercase , __lowercase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(__lowercase )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(__lowercase )
logger.info("Head ranked by importance scores" )
_A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_A = torch.arange(
head_importance.numel() , device=args.device )
_A = head_ranks.view_as(__lowercase )
print_ad_tensor(__lowercase )
return attn_entropy, head_importance, total_loss
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_A , _A , _A = compute_heads_importance(__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase )
_A = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , __lowercase , original_score * args.masking_threshold )
_A = torch.ones_like(__lowercase )
_A = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_A = original_score
while current_score >= original_score * args.masking_threshold:
_A = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_A = float("Inf" )
_A = head_importance.view(-1 ).sort()[1]
if len(__lowercase ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
_A = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
_A = new_head_mask.view(-1 )
_A = 0.0
_A = new_head_mask.view_as(__lowercase )
_A = new_head_mask.clone().detach()
print_ad_tensor(__lowercase )
# Compute metric and head importance again
_A , _A , _A = compute_heads_importance(
__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , head_mask=__lowercase )
_A = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(__lowercase )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_A = datetime.now()
_A , _A , _A = compute_heads_importance(
__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase )
_A = 1 / loss
_A = datetime.now() - before_time
_A = sum(p.numel() for p in model.parameters() )
_A = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowercase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowercase , __lowercase ):
_A = [
v,
]
assert sum(len(__lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowercase )
_A = sum(p.numel() for p in model.parameters() )
_A = datetime.now()
_A , _A , _A = compute_heads_importance(
__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase , actually_pruned=__lowercase , )
_A = 1 / loss
_A = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowercase , __lowercase , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowercase , __lowercase )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(__lowercase , args.output_dir )
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=__lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=__lowercase , type=__lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=__lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=__lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=__lowercase , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=__lowercase , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=__lowercase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=__lowercase , help="Batch size." )
parser.add_argument("--seed" , type=__lowercase , default=42 )
parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." )
_A = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
_A = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_A = torch.device("cuda" , args.local_rank )
_A = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_A = nn.parallel.DistributedDataParallel(
__lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowercase )
elif args.n_gpu > 1:
_A = nn.DataParallel(__lowercase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowercase )
torch.save(__lowercase , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , __lowercase )
# Prepare dataset
_A = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_A = (torch.from_numpy(__lowercase ),)
_A = TensorDataset(*__lowercase )
_A = RandomSampler(__lowercase )
_A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowercase , __lowercase , __lowercase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_A = mask_heads(__lowercase , __lowercase , __lowercase )
prune_heads(__lowercase , __lowercase , __lowercase , __lowercase )
if __name__ == "__main__":
main()
| 79 | 1 |
'''simple docstring'''
import math
def A_( A : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_( A : float = 0.1):
UpperCamelCase = 3
UpperCamelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(A)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowerCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> int:
'''simple docstring'''
super().__init__()
UpperCamelCase = torchvision.models.resnetaaa(pretrained=A_ )
UpperCamelCase = list(model.children() )[:-2]
UpperCamelCase = nn.Sequential(*A_ )
UpperCamelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.pool(self.model(A_ ) )
UpperCamelCase = torch.flatten(A_ , start_dim=2 )
UpperCamelCase = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ , A_ , A_ , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = [json.loads(A_ ) for l in open(A_ )]
UpperCamelCase = os.path.dirname(A_ )
UpperCamelCase = tokenizer
UpperCamelCase = labels
UpperCamelCase = len(A_ )
UpperCamelCase = max_seq_length
UpperCamelCase = transforms
def __len__( self )-> Union[str, Any]:
'''simple docstring'''
return len(self.data )
def __getitem__( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=A_ ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase = sentence[0], sentence[1:-1], sentence[-1]
UpperCamelCase = sentence[: self.max_seq_length]
UpperCamelCase = torch.zeros(self.n_classes )
UpperCamelCase = 1
UpperCamelCase = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
UpperCamelCase = self.transforms(A_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def A_( A : Union[str, Any]):
UpperCamelCase = [len(row['sentence']) for row in batch]
UpperCamelCase , UpperCamelCase = len(A), max(A)
UpperCamelCase = torch.zeros(A , A , dtype=torch.long)
UpperCamelCase = torch.zeros(A , A , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(A , A)):
UpperCamelCase = input_row['sentence']
UpperCamelCase = 1
UpperCamelCase = torch.stack([row['image'] for row in batch])
UpperCamelCase = torch.stack([row['label'] for row in batch])
UpperCamelCase = torch.stack([row['image_start_token'] for row in batch])
UpperCamelCase = torch.stack([row['image_end_token'] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A_( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A_( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
])
| 251 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 163 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _UpperCamelCase ( UpperCamelCase__ ):
return x + 2
class _snake_case ( unittest.TestCase ):
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = """x = 3"""
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3})
UpperCAmelCase__ : Optional[int] = """x = y"""
UpperCAmelCase__ : Optional[Any] = {"""y""": 5}
UpperCAmelCase__ : Dict = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 5, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Any = """y = add_two(x)"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
# Won't work without the tool
with CaptureStdout() as out:
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result is None
assert "tried to execute add_two" in out.out
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """x = 3"""
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """test_dict = {'x': x, 'y': add_two(x)}"""
UpperCAmelCase__ : Any = {"""x""": 3}
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}})
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = """x = 3\ny = 5"""
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Dict = """text = f'This is x: {x}.'"""
UpperCAmelCase__ : str = {"""x""": 3}
UpperCAmelCase__ : Optional[Any] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """text""": """This is x: 3."""})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """if x <= 3:\n y = 2\nelse:\n y = 5"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 2})
UpperCAmelCase__ : Optional[int] = {"""x""": 8}
UpperCAmelCase__ : int = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 8, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """test_list = [x, add_two(x)]"""
UpperCAmelCase__ : int = {"""x""": 3}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , [3, 5])
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_list""": [3, 5]})
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = """y = x"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 3})
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = """test_list = [x, add_two(x)]\ntest_list[1]"""
UpperCAmelCase__ : Union[str, Any] = {"""x""": 3}
UpperCAmelCase__ : int = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_list""": [3, 5]})
UpperCAmelCase__ : List[str] = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
UpperCAmelCase__ : Any = {"""x""": 3}
UpperCAmelCase__ : Dict = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}})
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = """x = 0\nfor i in range(3):\n x = i"""
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""range""": range} , state=_lowerCamelCase)
assert result == 2
self.assertDictEqual(_lowerCamelCase , {"""x""": 2, """i""": 2}) | 163 | 1 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase__( __A ):
lowerCAmelCase__ : List[str] = 'linear'
lowerCAmelCase__ : Optional[int] = 'cosine'
lowerCAmelCase__ : Optional[Any] = 'cosine_with_restarts'
lowerCAmelCase__ : int = 'polynomial'
lowerCAmelCase__ : Any = 'constant'
lowerCAmelCase__ : Dict = 'constant_with_warmup'
lowerCAmelCase__ : List[str] = 'piecewise_constant'
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = -1 ):
"""simple docstring"""
return LambdaLR(UpperCamelCase__ , lambda UpperCamelCase__ : 1 , last_epoch=UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = -1 ):
"""simple docstring"""
def lr_lambda(UpperCamelCase__ ):
if current_step < num_warmup_steps:
return float(UpperCamelCase__ ) / float(max(1.0 , UpperCamelCase__ ) )
return 1.0
return LambdaLR(UpperCamelCase__ , UpperCamelCase__ , last_epoch=UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = -1 ):
"""simple docstring"""
A__ = {}
A__ = step_rules.split(',' )
for rule_str in rule_list[:-1]:
A__ , A__ = rule_str.split(':' )
A__ = int(UpperCamelCase__ )
A__ = float(UpperCamelCase__ )
A__ = value
A__ = float(rule_list[-1] )
def create_rules_function(UpperCamelCase__ , UpperCamelCase__ ):
def rule_func(UpperCamelCase__ ) -> float:
A__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(UpperCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ = create_rules_function(UpperCamelCase__ , UpperCamelCase__ )
return LambdaLR(UpperCamelCase__ , UpperCamelCase__ , last_epoch=UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=-1 ):
"""simple docstring"""
def lr_lambda(UpperCamelCase__ ):
if current_step < num_warmup_steps:
return float(UpperCamelCase__ ) / float(max(1 , UpperCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0.5 , UpperCamelCase__ = -1 ):
"""simple docstring"""
def lr_lambda(UpperCamelCase__ ):
if current_step < num_warmup_steps:
return float(UpperCamelCase__ ) / float(max(1 , UpperCamelCase__ ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(UpperCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 , UpperCamelCase__ = -1 ):
"""simple docstring"""
def lr_lambda(UpperCamelCase__ ):
if current_step < num_warmup_steps:
return float(UpperCamelCase__ ) / float(max(1 , UpperCamelCase__ ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(UpperCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1E-7 , UpperCamelCase__=1.0 , UpperCamelCase__=-1 ):
"""simple docstring"""
A__ = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(UpperCamelCase__ ):
if current_step < num_warmup_steps:
return float(UpperCamelCase__ ) / float(max(1 , UpperCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ = lr_init - lr_end
A__ = num_training_steps - num_warmup_steps
A__ = 1 - (current_step - num_warmup_steps) / decay_steps
A__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 1.0 , UpperCamelCase__ = -1 , ):
"""simple docstring"""
A__ = SchedulerType(UpperCamelCase__ )
A__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(UpperCamelCase__ , last_epoch=UpperCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(UpperCamelCase__ , step_rules=UpperCamelCase__ , last_epoch=UpperCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(UpperCamelCase__ , num_warmup_steps=UpperCamelCase__ , last_epoch=UpperCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
UpperCamelCase__ , num_warmup_steps=UpperCamelCase__ , num_training_steps=UpperCamelCase__ , num_cycles=UpperCamelCase__ , last_epoch=UpperCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
UpperCamelCase__ , num_warmup_steps=UpperCamelCase__ , num_training_steps=UpperCamelCase__ , power=UpperCamelCase__ , last_epoch=UpperCamelCase__ , )
return schedule_func(
UpperCamelCase__ , num_warmup_steps=UpperCamelCase__ , num_training_steps=UpperCamelCase__ , last_epoch=UpperCamelCase__ )
| 154 | """simple docstring"""
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = None
A__ = None
A__ = graph
self._normalize_graph(__UpperCAmelCase ,__UpperCAmelCase )
A__ = len(__UpperCAmelCase )
A__ = None
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
if sources is int:
A__ = [sources]
if sinks is int:
A__ = [sinks]
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
return
A__ = sources[0]
A__ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__UpperCAmelCase ) > 1 or len(__UpperCAmelCase ) > 1:
A__ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A__ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 ,0 )
self.graph.insert(0 ,[0] * size )
for i in sources:
A__ = max_input_flow
A__ = 0
A__ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A__ = max_input_flow
A__ = size - 1
def snake_case__ ( self ) -> Optional[int]:
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def snake_case__ ( self ,__UpperCAmelCase ) -> Any:
A__ = algorithm(self )
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ) -> Optional[int]:
A__ = flow_network
A__ = flow_network.verticesCount
A__ = flow_network.sourceIndex
A__ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A__ = flow_network.graph
A__ = False
def snake_case__ ( self ) -> Optional[Any]:
if not self.executed:
self._algorithm()
A__ = True
def snake_case__ ( self ) -> Tuple:
pass
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> List[Any]:
super().__init__(__UpperCAmelCase )
# use this to save your result
A__ = -1
def snake_case__ ( self ) -> Any:
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> int:
super().__init__(__UpperCAmelCase )
A__ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A__ = [0] * self.verticies_count
A__ = [0] * self.verticies_count
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A__ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A__ = 0
while i < len(__UpperCAmelCase ):
A__ = vertices_list[i]
A__ = self.heights[vertex_index]
self.process_vertex(__UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 ,vertices_list.pop(__UpperCAmelCase ) )
A__ = 0
else:
i += 1
A__ = sum(self.preflow[self.source_index] )
def snake_case__ ( self ,__UpperCAmelCase ) -> List[Any]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__UpperCAmelCase ,__UpperCAmelCase )
self.relabel(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
A__ = min(
self.excesses[from_index] ,self.graph[from_index][to_index] - self.preflow[from_index][to_index] ,)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def snake_case__ ( self ,__UpperCAmelCase ) -> Any:
A__ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A__ = self.heights[to_index]
if min_height is not None:
A__ = min_height + 1
if __name__ == "__main__":
__lowerCamelCase = [0]
__lowerCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCamelCase = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 154 | 1 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
UpperCAmelCase__ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
UpperCAmelCase__ = tf.cast(math.pi , x.dtype )
UpperCAmelCase__ = tf.cast(0.044715 , x.dtype )
UpperCAmelCase__ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
UpperCAmelCase__ = tf.cast(0.044715 , x.dtype )
UpperCAmelCase__ = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
UpperCAmelCase__ = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def a_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -1_0 , 1_0 )
def a_ ( lowerCamelCase , lowerCamelCase=-1 ):
UpperCAmelCase__ , UpperCAmelCase__ = tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def a_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
lowerCAmelCase__ : str = tf.keras.activations.gelu
lowerCAmelCase__ : Tuple = approximate_gelu_wrap
else:
lowerCAmelCase__ : str = _gelu
lowerCAmelCase__ : Optional[Any] = _gelu_new
lowerCAmelCase__ : Any = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def a_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 98 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_UpperCAmelCase : List[Any] = get_logger(__name__)
_UpperCAmelCase : Tuple = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , A_ , **A_ ) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
UpperCamelCase = inspect.signature(processor.__call__ ).parameters
if len(A_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
UpperCamelCase = processor(A_ , A_ , A_ , **A_ )
else:
UpperCamelCase = processor(A_ , A_ , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
if not isinstance(A_ , A_ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
UpperCamelCase = temperature
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores / self.temperature
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> List[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(A_ , A_ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
UpperCamelCase = top_p
UpperCamelCase = filter_value
UpperCamelCase = min_tokens_to_keep
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , scores.shape[-1] )
UpperCamelCase = jnp.full_like(A_ , self.filter_value )
UpperCamelCase = jax.nn.softmax(A_ , axis=-1 ).cumsum(axis=-1 )
UpperCamelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCamelCase = jnp.roll(A_ , 1 )
score_mask |= score_mask.at[:, 0].set(A_ )
# min tokens to keep
UpperCamelCase = score_mask.at[:, : self.min_tokens_to_keep].set(A_ )
UpperCamelCase = jnp.where(A_ , A_ , A_ )
UpperCamelCase = jax.lax.sort_key_val(A_ , A_ )[-1]
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> List[str]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
UpperCamelCase = max(A_ , A_ )
UpperCamelCase = filter_value
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = scores.shape
UpperCamelCase = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCamelCase = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , A_ )
UpperCamelCase = jnp.broadcast_to((jnp.arange(A_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCamelCase = topk_scores.flatten()
UpperCamelCase = topk_indices.flatten() + shift
UpperCamelCase = next_scores_flat.at[topk_indices_flat].set(A_ )
UpperCamelCase = next_scores_flat.reshape(A_ , A_ )
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = bos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.bos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = max_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.eos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(A_ , A_ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
UpperCamelCase = min_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
# create boolean flag to decide if min length penalty should be applied
UpperCamelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = list(A_ )
UpperCamelCase = begin_index
def __call__( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = dict(A_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCamelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCamelCase = force_token_array.at[index].set(A_ )
UpperCamelCase = jnp.intaa(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
def _force_token(A_ ):
UpperCamelCase = scores.shape[0]
UpperCamelCase = self.force_token_array[generation_idx]
UpperCamelCase = jnp.ones_like(A_ , dtype=scores.dtype ) * -float('inf' )
UpperCamelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCamelCase = lax.dynamic_update_slice(A_ , A_ , (0, current_token) )
return new_scores
UpperCamelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(A_ ) , lambda: scores , ) , )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = generate_config.eos_token_id
UpperCamelCase = generate_config.no_timestamps_token_id
UpperCamelCase = generate_config.no_timestamps_token_id + 1
UpperCamelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(A_ , 'max_initial_timestamp_index' ):
UpperCamelCase = generate_config.max_initial_timestamp_index
else:
UpperCamelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCamelCase = model_config.vocab_size
def __call__( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCamelCase = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(A_ , A_ ):
UpperCamelCase = jnp.where((cur_len - self.begin_index) >= 1 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , A_ , )
UpperCamelCase = jnp.where((cur_len - self.begin_index) < 2 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , A_ , A_ , )
return jnp.where(
A_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
UpperCamelCase = jnp.where(cur_len == self.begin_index , A_ , A_ )
UpperCamelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , A_ , )
UpperCamelCase = self.timestamp_begin + self.max_initial_timestamp_index
UpperCamelCase = jnp.where(
A_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , A_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCamelCase = jax.nn.log_softmax(A_ , axis=-1 )
def handle_cumulative_probs(A_ , A_ ):
UpperCamelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCamelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
return scores
| 222 | 0 |
"""simple docstring"""
import math
def lowercase_ ( _lowerCamelCase: float , _lowerCamelCase: float ) -> float:
'''simple docstring'''
if (
not isinstance(_lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def lowercase_ ( _lowerCamelCase: float , _lowerCamelCase: float ) -> float:
'''simple docstring'''
if (
not isinstance(_lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 362 | """simple docstring"""
def lowercase_ ( _lowerCamelCase: int = 4000000 ) -> int:
'''simple docstring'''
__lowerCamelCase : Tuple = [0, 1]
__lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__lowerCamelCase : Tuple = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""") | 64 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.