code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
a_ : Optional[int] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=__A , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=__A , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=__A , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=__A , default='data/dump' , help='The dump file prefix.' )
a_ : str = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
a_ : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
a_ : Dict = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
a_ : Optional[int] = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
a_ : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
a_ : Optional[int] = tokenizer.special_tokens_map['cls_token'] # `<s>`
a_ : Any = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
a_ : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
a_ : Any = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
a_ : Tuple = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
a_ : List[Any] = fp.readlines()
logger.info('Start encoding' )
logger.info(F"""{len(__A )} examples to process.""" )
a_ : Tuple = []
a_ : Dict = 0
a_ : Any = 1_00_00
a_ : Optional[int] = time.time()
for text in data:
a_ : Optional[Any] = F"""{bos} {text.strip()} {sep}"""
a_ : Optional[Any] = tokenizer.encode(__A , add_special_tokens=__A )
rslt.append(__A )
iter += 1
if iter % interval == 0:
a_ : Dict = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
a_ : Optional[int] = time.time()
logger.info('Finished binarization' )
logger.info(F"""{len(__A )} examples processed.""" )
a_ : List[Any] = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
a_ : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
a_ : List[str] = [np.uintaa(__A ) for d in rslt]
else:
a_ : List[Any] = [np.intaa(__A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(__A , 'wb' ) as handle:
pickle.dump(rslt_ , __A , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 32 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class A:
'''simple docstring'''
def __init__( self : str , A_ : Optional[Any] , ) -> str:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 2
lowerCamelCase_ = 99
lowerCamelCase_ = 0
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.02
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 'last'
lowerCamelCase_ = True
lowerCamelCase_ = None
lowerCamelCase_ = 0
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCamelCase_ = None
if self.use_input_lengths:
lowerCamelCase_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self : int , A_ : List[str] , A_ : List[Any] , A_ : str , A_ : List[Any] , A_ : int , A_ : Tuple , A_ : Optional[int] , A_ : Optional[int] , A_ : str , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModel(config=A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : Tuple , A_ : List[str] , A_ : int , A_ : List[Any] , A_ : Any , A_ : Any , A_ : Dict , A_ : str , A_ : List[Any] , A_ : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertWithLMHeadModel(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , A_ : Tuple , A_ : Any , A_ : Any , A_ : List[Any] , A_ : Dict , A_ : List[Any] , A_ : Union[str, Any] , A_ : Optional[int] , A_ : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertForQuestionAnsweringSimple(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[int] , A_ : List[Any] , A_ : str , A_ : List[str] , A_ : Dict , A_ : Optional[Any] , A_ : Tuple , A_ : str , A_ : Optional[int] , A_ : Tuple , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertForSequenceClassification(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : Dict , A_ : Optional[Any] , A_ : List[Any] , A_ : int , A_ : Any , A_ : Union[str, Any] , A_ : str , A_ : Any , A_ : Union[str, Any] , A_ : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFFlaubertForTokenClassification(config=A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : List[Any] , A_ : Optional[int] , A_ : List[Any] , A_ : Optional[int] , A_ : Tuple , A_ : Union[str, Any] , A_ : int , A_ : str , A_ : Tuple , A_ : str , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = TFFlaubertForMultipleChoice(config=A_ )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Union[str, Any] , A_ : Any , A_ : List[Any] , A_ : Union[str, Any] , A_ : str , A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , emb_dim=37 )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
lowerCamelCase_ = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCamelCase_ = model(A_ )[0]
lowerCamelCase_ = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
lowerCamelCase_ = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 204 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__lowercase = logging.get_logger('''transformers.models.speecht5''')
__lowercase = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__lowercase = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__lowercase = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__lowercase = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__lowercase = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__lowercase = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__lowercase = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__lowercase = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__lowercase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__lowercase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowercase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowercase = []
__lowercase = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__lowercase = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__lowercase = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__lowercase = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :List[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
__UpperCamelCase :int = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
__UpperCamelCase :int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__UpperCamelCase :Dict = value
elif weight_type == "weight_g":
__UpperCamelCase :List[str] = value
elif weight_type == "weight_v":
__UpperCamelCase :List[Any] = value
elif weight_type == "bias":
__UpperCamelCase :Any = value
elif weight_type == "running_mean":
__UpperCamelCase :List[str] = value
elif weight_type == "running_var":
__UpperCamelCase :Optional[int] = value
elif weight_type == "num_batches_tracked":
__UpperCamelCase :Optional[int] = value
else:
__UpperCamelCase :Union[str, Any] = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__UpperCamelCase , __UpperCamelCase :Dict = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = []
if task == "s2t":
__UpperCamelCase :Any = hf_model.speechta.encoder.prenet.feature_encoder
__UpperCamelCase :Any = MAPPING_S2T
__UpperCamelCase :str = IGNORE_KEYS_S2T
elif task == "t2s":
__UpperCamelCase :str = None
__UpperCamelCase :str = MAPPING_T2S
__UpperCamelCase :Union[str, Any] = IGNORE_KEYS_T2S
elif task == "s2s":
__UpperCamelCase :Dict = hf_model.speechta.encoder.prenet.feature_encoder
__UpperCamelCase :Optional[int] = MAPPING_S2S
__UpperCamelCase :Any = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info(f"""{name} was ignored""" )
continue
__UpperCamelCase :Dict = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__UpperCamelCase , __UpperCamelCase :Optional[int] = key.split('''.*.''' )
if prefix in name and suffix in name:
__UpperCamelCase :Optional[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__UpperCamelCase :List[str] = True
if "*" in mapped_key:
__UpperCamelCase :List[Any] = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
__UpperCamelCase :List[str] = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
__UpperCamelCase :Optional[Any] = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :Any = '''weight_v'''
elif "bias" in name:
__UpperCamelCase :Union[str, Any] = '''bias'''
elif "weight" in name:
__UpperCamelCase :Tuple = '''weight'''
elif "running_mean" in name:
__UpperCamelCase :Tuple = '''running_mean'''
elif "running_var" in name:
__UpperCamelCase :Optional[int] = '''running_var'''
elif "num_batches_tracked" in name:
__UpperCamelCase :List[Any] = '''num_batches_tracked'''
else:
__UpperCamelCase :List[str] = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :List[Any] = name.split('''.''' )
__UpperCamelCase :int = int(items[0] )
__UpperCamelCase :Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__UpperCamelCase :Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__UpperCamelCase :List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__UpperCamelCase :Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__UpperCamelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :Optional[Any] = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
__UpperCamelCase :Tuple = SpeechTaConfig()
if task == "s2t":
__UpperCamelCase :Any = config.max_text_positions
__UpperCamelCase :Any = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
__UpperCamelCase :List[str] = 1_876
__UpperCamelCase :List[str] = 600
__UpperCamelCase :Optional[Any] = config.max_speech_positions
__UpperCamelCase :Tuple = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
__UpperCamelCase :Any = 1_876
__UpperCamelCase :Tuple = config.max_speech_positions
__UpperCamelCase :Tuple = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
__UpperCamelCase :List[str] = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__UpperCamelCase :Dict = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
__UpperCamelCase :Optional[Any] = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
__UpperCamelCase :str = SpeechTaFeatureExtractor()
__UpperCamelCase :List[Any] = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
__UpperCamelCase :List[str] = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__lowercase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 361 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowercase = logging.getLogger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """token-classification"""
def __init__( self , __lowercase) -> str:
if type(__lowercase) == dict:
__UpperCamelCase :List[Any] = Namespace(**__lowercase)
__UpperCamelCase :Dict = import_module('''tasks''')
try:
__UpperCamelCase :str = getattr(__lowercase , hparams.task_type)
__UpperCamelCase :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""")
__UpperCamelCase :Tuple = self.token_classification_task.get_labels(hparams.labels)
__UpperCamelCase :Tuple = CrossEntropyLoss().ignore_index
super().__init__(__lowercase , len(self.labels) , self.mode)
def UpperCamelCase__ ( self , **__lowercase) -> List[Any]:
return self.model(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Any:
__UpperCamelCase :str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__UpperCamelCase :Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__UpperCamelCase :Dict = self(**__lowercase)
__UpperCamelCase :str = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[Any] = self.hparams
for mode in ["train", "dev", "test"]:
__UpperCamelCase :int = self._feature_file(__lowercase)
if os.path.exists(__lowercase) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __lowercase)
__UpperCamelCase :Any = torch.load(__lowercase)
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir)
__UpperCamelCase :Any = self.token_classification_task.read_examples_from_file(args.data_dir , __lowercase)
__UpperCamelCase :Union[str, Any] = self.token_classification_task.convert_examples_to_features(
__lowercase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet''']) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(self.config.model_type in ['''xlnet''']) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __lowercase)
torch.save(__lowercase , __lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = False) -> DataLoader:
__UpperCamelCase :Tuple = self._feature_file(__lowercase)
logger.info('''Loading features from cached file %s''' , __lowercase)
__UpperCamelCase :str = torch.load(__lowercase)
__UpperCamelCase :int = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
__UpperCamelCase :Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
if features[0].token_type_ids is not None:
__UpperCamelCase :str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
else:
__UpperCamelCase :Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long)
# HACK(we will not use this anymore soon)
__UpperCamelCase :int = torch.tensor([f.label_ids for f in features] , dtype=torch.long)
return DataLoader(
TensorDataset(__lowercase , __lowercase , __lowercase , __lowercase) , batch_size=__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Dict:
"""Compute validation""" ""
__UpperCamelCase :int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__UpperCamelCase :Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__UpperCamelCase :Any = self(**__lowercase)
__UpperCamelCase , __UpperCamelCase :Tuple = outputs[:2]
__UpperCamelCase :List[str] = logits.detach().cpu().numpy()
__UpperCamelCase :List[str] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
__UpperCamelCase :Tuple = torch.stack([x['''val_loss'''] for x in outputs]).mean()
__UpperCamelCase :str = np.concatenate([x['''pred'''] for x in outputs] , axis=0)
__UpperCamelCase :Any = np.argmax(__lowercase , axis=2)
__UpperCamelCase :str = np.concatenate([x['''target'''] for x in outputs] , axis=0)
__UpperCamelCase :List[str] = dict(enumerate(self.labels))
__UpperCamelCase :Tuple = [[] for _ in range(out_label_ids.shape[0])]
__UpperCamelCase :Any = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
__UpperCamelCase :Any = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__lowercase , __lowercase),
'''precision''': precision_score(__lowercase , __lowercase),
'''recall''': recall_score(__lowercase , __lowercase),
'''f1''': fa_score(__lowercase , __lowercase),
}
__UpperCamelCase :Dict = dict(results.items())
__UpperCamelCase :List[str] = results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __lowercase) -> int:
# when stable
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = self._eval_end(__lowercase)
__UpperCamelCase :Tuple = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __lowercase) -> int:
# updating to test_epoch_end instead of deprecated test_end
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = self._eval_end(__lowercase)
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__UpperCamelCase :Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __lowercase , __lowercase) -> Union[str, Any]:
# Add NER specific options
BaseTransformer.add_model_specific_args(__lowercase , __lowercase)
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__lowercase , help='''Task type to fine tune in training (e.g. NER, POS, etc)''')
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__lowercase , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__lowercase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''')
return parser
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowercase = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowercase = parser.parse_args()
__lowercase = NERTransformer(args)
__lowercase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowercase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
__lowercase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 105 | 0 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__A : Tuple = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def lowercase ( __snake_case : str = "dhaka" , __snake_case : int = 5 ):
lowercase_ : Dict = min(__snake_case , 5_0 ) # Prevent abuse!
lowercase_ : Optional[int] = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
lowercase_ : int = requests.get('''https://www.google.com/search''' , params=__snake_case , headers=__snake_case )
lowercase_ : List[Any] = BeautifulSoup(html.text , '''html.parser''' )
lowercase_ : Tuple = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
lowercase_ : Union[str, Any] = json.dumps(__snake_case )
lowercase_ : Optional[int] = json.loads(__snake_case )
lowercase_ : Dict = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , __snake_case , )
if not matched_google_image_data:
return 0
lowercase_ : Optional[int] = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(__snake_case ) , )
lowercase_ : Optional[Any] = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , __snake_case , )
for index, fixed_full_res_image in enumerate(__snake_case ):
if index >= max_images:
return index
lowercase_ : List[str] = bytes(__snake_case , '''ascii''' ).decode(
'''unicode-escape''' )
lowercase_ : Any = bytes(__snake_case , '''ascii''' ).decode(
'''unicode-escape''' )
lowercase_ : Any = urllib.request.build_opener()
lowercase_ : List[str] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(__snake_case )
lowercase_ : Dict = F'''query_{query.replace(' ' , '_' )}'''
if not os.path.exists(__snake_case ):
os.makedirs(__snake_case )
urllib.request.urlretrieve( # noqa: S310
__snake_case , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
__A : Union[str, Any] = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print('''Please provide a search term.''')
raise
| 33 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCamelCase__ :
def __init__(self : List[Any] , snake_case_ : int , snake_case_ : List[str]=1_3 , snake_case_ : Tuple=7 , snake_case_ : List[Any]=True , snake_case_ : List[Any]=True , snake_case_ : Dict=True , snake_case_ : Optional[int]=True , snake_case_ : str=9_9 , snake_case_ : Dict=6_4 , snake_case_ : Any=3_2 , snake_case_ : str=5 , snake_case_ : int=4 , snake_case_ : List[Any]=3_7 , snake_case_ : Any="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : str=5_1_2 , snake_case_ : Any=1_6 , snake_case_ : str=2 , snake_case_ : int=0.02 , snake_case_ : Union[str, Any]=3 , snake_case_ : Optional[Any]=4 , snake_case_ : List[Any]=None , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : Any = seq_length
__a : int = is_training
__a : Optional[int] = use_input_mask
__a : List[Any] = use_token_type_ids
__a : Dict = use_labels
__a : Tuple = vocab_size
__a : str = hidden_size
__a : List[Any] = embedding_size
__a : List[Any] = num_hidden_layers
__a : str = num_attention_heads
__a : str = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Optional[Any] = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Union[str, Any] = max_position_embeddings
__a : Any = type_vocab_size
__a : int = type_sequence_label_size
__a : int = initializer_range
__a : int = num_labels
__a : Union[str, Any] = num_choices
__a : Dict = scope
def lowerCAmelCase (self : str ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[Any] = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Dict = None
__a : List[str] = None
__a : Optional[Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase (self : int ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase (self : str , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : int , snake_case_ : int , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Any ):
__a : Any = MobileBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[str] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
__a : Optional[Any] = model(snake_case_ , token_type_ids=snake_case_ )
__a : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase (self : Any , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : str , snake_case_ : List[Any] ):
__a : str = MobileBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase (self : Tuple , snake_case_ : Any , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Dict ):
__a : Optional[Any] = MobileBertForNextSentencePrediction(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : int = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase (self : Any , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
__a : str = MobileBertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Union[str, Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase (self : Dict , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Dict , snake_case_ : int , snake_case_ : int , snake_case_ : str , snake_case_ : str ):
__a : str = MobileBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Optional[Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase (self : Optional[int] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Optional[int] ):
__a : Any = self.num_labels
__a : Union[str, Any] = MobileBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase (self : List[Any] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Optional[int] ):
__a : Union[str, Any] = self.num_labels
__a : str = MobileBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Any = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Union[str, Any] ):
__a : Union[str, Any] = self.num_choices
__a : List[str] = MobileBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Any = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase (self : Optional[Any] ):
__a : Optional[Any] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = config_and_inputs
__a : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Any = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def lowerCAmelCase (self : str , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=False ):
__a : List[str] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
__a : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
__a : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowerCAmelCase (self : Tuple ):
__a : List[Any] = MobileBertModelTester(self )
__a : int = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def lowerCAmelCase (self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase (self : Optional[Any] ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case_ )
def lowerCAmelCase (self : str ):
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ )
def lowerCAmelCase (self : Dict ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ )
def lowerCAmelCase (self : int ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ )
def lowerCAmelCase (self : List[Any] ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ )
def lowerCAmelCase (self : int ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
return torch.tensor(
lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ , )
lowercase__ =1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Any ):
__a : Dict = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(snake_case_ )
__a : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
__a : str = model(snake_case_ )[0]
__a : List[Any] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape , snake_case_ )
__a : Union[str, Any] = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] , device=snake_case_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__a : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__a : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 216 | 0 |
def _UpperCAmelCase (UpperCamelCase_ : int = 1000 ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = 1, 1
_lowerCAmelCase : Optional[Any] = 2
while True:
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = fa + fa
_lowerCAmelCase , _lowerCAmelCase : int = fa, f
index += 1
for _ in str(UpperCamelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 159 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_lowerCamelCase : Dict = namedtuple("covid_data", "cases deaths recovered")
def _UpperCAmelCase (UpperCamelCase_ : str = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
_lowerCAmelCase : Dict = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(UpperCamelCase_ ).content ).xpath(UpperCamelCase_ ) )
_lowerCamelCase : Tuple = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 159 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ : Dict = logging.getLogger(__name__)
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowercase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowercase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowercase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowercase , default=1000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowercase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowercase , type=lowercase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowercase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : List[Any] ) -> Tuple:
def fn(lowercase : Optional[Any] ):
return tokenizer(examples["text"] )
return fn
def _lowerCamelCase ( lowercase : List[Any] ) -> Dict:
_a = []
for i in range(len(tokenized_data["input_ids"] ) ):
_a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
_a = tf.train.Features(feature=lowercase )
_a = tf.train.Example(features=lowercase )
_a = example.SerializeToString()
records.append(lowercase )
return records
def _lowerCamelCase ( lowercase : Dict ) -> str:
_a = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_a = min(len(lowercase ) , args.limit )
_a = dataset.select(range(lowercase ) )
print(F'Limiting the dataset to {args.limit} entries.' )
_a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_a = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
_a = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_a = tokenize_function(lowercase )
_a = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase : Tuple ):
# Concatenate all texts.
_a = {k: sum(examples[k] , [] ) for k in examples.keys()}
_a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_a = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_a = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1000 , num_proc=4 )
_a = 0
_a = 0
for shard in range(0 , len(lowercase ) , args.shard_size ):
_a = grouped_dataset[shard : shard + args.shard_size]
_a = len(dataset_snapshot["input_ids"] )
_a = os.path.join(lowercase , F'dataset-{shard_count}-{records_containing}.tfrecord' )
_a = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
_a = serialized_examples[i]
out_file.write(lowercase )
print("Wrote file {} containing {} records".format(lowercase , lowercase ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , "w" ) as f:
print(F'Total {args.split} records: {total_records}' , file=lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = parse_args()
main(args)
| 63 | import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = """Normal"""
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = """Abnormality detected"""
| 343 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Any = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = BartphoTokenizer
A__ = False
A__ = True
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__snake_case : Any = ['▁This', '▁is', '▁a', '▁t', 'est']
__snake_case : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__snake_case : Tuple = {'unk_token': '<unk>'}
__snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
__snake_case : int = BartphoTokenizer(__a , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Tuple , **__a : Dict ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : Optional[Any] , __a : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = 'This is a là test'
__snake_case : Tuple = 'This is a<unk><unk> test'
return input_text, output_text
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[str] = BartphoTokenizer(__a , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case : Tuple = 'This is a là test'
__snake_case : int = '▁This ▁is ▁a ▁l à ▁t est'.split()
__snake_case : str = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__snake_case : Tuple = tokens + [tokenizer.unk_token]
__snake_case : Union[str, Any] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
| 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a_ ( _UpperCAmelCase : List[Any] ) -> Tuple:
__snake_case : str = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ) -> List[str]:
__snake_case : Tuple = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict:
__snake_case : Union[str, Any] = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def a_ ( ) -> Optional[Any]:
__snake_case : Any = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ) -> Tuple:
__snake_case : List[str] = 'imagenet-1k-id2label.json'
__snake_case : Dict = 10_00
__snake_case : Union[str, Any] = 'huggingface/label-files'
__snake_case : str = num_labels
__snake_case : str = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ) ,'r' ) )
__snake_case : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : str = {v: k for k, v in idalabel.items()}
__snake_case : Dict = CvtConfig(num_labels=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' ,1 )[-1][4:6] == "13":
__snake_case : Tuple = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' ,1 )[-1][4:6] == "21":
__snake_case : str = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__snake_case : Dict = [2, 2, 20]
__snake_case : Any = [3, 12, 16]
__snake_case : Tuple = [1_92, 7_68, 10_24]
__snake_case : str = CvtForImageClassification(_UpperCAmelCase )
__snake_case : List[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
__snake_case : int = image_size
__snake_case : int = torch.load(_UpperCAmelCase ,map_location=torch.device('cpu' ) )
__snake_case : List[Any] = OrderedDict()
__snake_case : Union[str, Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__snake_case : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
__snake_case : Tuple = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__snake_case : Optional[int] = list_of_state_dict + attention(_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__snake_case : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : Tuple = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 1 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCAmelCase_ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _lowerCAmelCase ( )->Any:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=lowerCAmelCase_ , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=lowerCAmelCase_ , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=lowerCAmelCase_ , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=lowerCAmelCase_ , default=0 , help="cuda_id." , )
snake_case_ = parser.parse_args()
return args
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :Union[str, Any] )->Union[str, Any]:
'''simple docstring'''
if not len(lowerCAmelCase_ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
snake_case_ , snake_case_ = imgs[0].size
snake_case_ = Image.new("RGB" , size=(cols * w, rows * h) )
snake_case_ , snake_case_ = grid.size
for i, img in enumerate(lowerCAmelCase_ ):
grid.paste(lowerCAmelCase_ , box=(i % cols * w, i // cols * h) )
return grid
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Union[str, Any]="robotic cat with wings" , lowerCAmelCase_ :Any=7.5 , lowerCAmelCase_ :Dict=50 , lowerCAmelCase_ :int=1 , lowerCAmelCase_ :Union[str, Any]=42 , )->str:
'''simple docstring'''
snake_case_ = torch.Generator(pipeline.device ).manual_seed(lowerCAmelCase_ )
snake_case_ = pipeline(
lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , ).images
snake_case_ = int(math.sqrt(lowerCAmelCase_ ) )
snake_case_ = image_grid(lowerCAmelCase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
SCREAMING_SNAKE_CASE :Dict = parse_args()
# Load models and create wrapper for stable diffusion
SCREAMING_SNAKE_CASE :Optional[int] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
SCREAMING_SNAKE_CASE :Tuple = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
SCREAMING_SNAKE_CASE :List[str] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
SCREAMING_SNAKE_CASE :Optional[int] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
SCREAMING_SNAKE_CASE :List[Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
SCREAMING_SNAKE_CASE :Dict = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
SCREAMING_SNAKE_CASE :Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = unet.to(torch.device('''cuda''', args.cuda_id))
SCREAMING_SNAKE_CASE :Optional[int] = pipeline.to(unet.device)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Optional[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
SCREAMING_SNAKE_CASE :Optional[Any] = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 159 | 0 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Tuple = '''umt5'''
__UpperCamelCase : Tuple = ['''past_key_values''']
def __init__( self : str , lowerCAmelCase_ : Any=2_5_0_1_1_2 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : List[Any]=6_4 , lowerCAmelCase_ : List[Any]=1_0_2_4 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : int=3_2 , lowerCAmelCase_ : Any=1_2_8 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=1e-6 , lowerCAmelCase_ : int=1.0 , lowerCAmelCase_ : Dict="gated-gelu" , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]="T5Tokenizer" , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Any=0 , **lowerCAmelCase_ : Dict , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase_ , tokenizer_class=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A: List[Any] = vocab_size
_A: Dict = d_model
_A: int = d_kv
_A: List[str] = d_ff
_A: Any = num_layers
_A: List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_A: int = num_heads
_A: str = relative_attention_num_buckets
_A: Optional[Any] = relative_attention_max_distance
_A: Tuple = dropout_rate
_A: Union[str, Any] = layer_norm_epsilon
_A: Optional[int] = initializer_factor
_A: List[Any] = feed_forward_proj
_A: int = use_cache
_A: Tuple = self.feed_forward_proj.split('''-''' )
_A: List[Any] = act_info[-1]
_A: int = act_info[0] == '''gated'''
if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
_A: Tuple = '''gelu_new'''
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return self.d_model
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return self.num_heads
@property
def __magic_name__ ( self : str ):
"""simple docstring"""
return self.num_layers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: str = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
_A: List[str] = '''past_encoder_sequence + sequence'''
_A: Optional[Any] = {0: '''batch'''}
_A: List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_A: Any = {0: '''batch''', 1: '''decoder_sequence'''}
_A: List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __magic_name__ ( self : int ):
"""simple docstring"""
return 1_3
@property
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return 5e-4
| 351 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ : Optional[int] = 'bart'
UpperCAmelCase__ : Dict = True
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Dict:
if LOAD_DENSE_INDEX:
_A: Optional[Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_A: Any = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_A: Any = qar_model.eval()
else:
_A , _A: Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
_A: Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_A: Dict = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_A: Union[str, Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_A: int = sas_model.eval()
else:
_A , _A: Tuple = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Tuple:
if LOAD_DENSE_INDEX:
_A: List[Any] = faiss.StandardGpuResources()
_A: int = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_A: Dict = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
_A: str = faiss.IndexFlatIP(1_28 )
_A: Optional[int] = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
_A , _A: str = (None, None)
_A: Tuple = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> str:
_A: Dict = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_A: Dict = elia['''train_eli5''']
_A: List[Any] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
_A: Any = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : int = load_indexes()
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : Any = load_models()
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = load_train_data()
def lowerCamelCase__ ( a , a=10 ) -> str:
_A: Optional[int] = embed_questions_for_retrieval([question] , a , a )
_A , _A: List[str] = eli5_train_q_index.search(a , a )
_A: Dict = [elia_train[int(a )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( a , a="wiki40b" , a="dense" , a=10 ) -> str:
if source == "none":
_A , _A: Any = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_A , _A: List[Any] = query_qa_dense_index(
a , a , a , a , a , a )
else:
_A , _A: Tuple = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
_A: Union[str, Any] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_A: str = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def lowerCamelCase__ ( a , a , a , a=64 , a=2_56 , a=False , a=2 , a=0.95 , a=0.8 ) -> str:
with torch.no_grad():
_A: Optional[int] = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
UpperCAmelCase__ : List[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
UpperCAmelCase__ : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
UpperCAmelCase__ : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
UpperCAmelCase__ : List[str] = action_list.index(action_st)
UpperCAmelCase__ : Optional[Any] = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
UpperCAmelCase__ : List[Any] = show_type == 'Show full text of passages'
else:
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Optional[Any] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
UpperCAmelCase__ : List[str] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
UpperCAmelCase__ : int = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
UpperCAmelCase__ : Tuple = 'wiki40b'
UpperCAmelCase__ : List[Any] = 'dense'
UpperCAmelCase__ : Tuple = 'beam'
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Dict = 64
UpperCAmelCase__ : Any = 256
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Generation options')
if generate_options:
UpperCAmelCase__ : Any = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
UpperCAmelCase__ : int = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ : str = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ : Tuple = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Optional[int] = None
# start main text
UpperCAmelCase__ : Any = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
UpperCAmelCase__ : List[Any] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ : Any = st.text_input('Enter your question here:', '')
else:
UpperCAmelCase__ : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = make_support(question, source=wiki_source, method='dense', n_results=10)
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
UpperCAmelCase__ : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ : str = support_list[:10]
UpperCAmelCase__ : str = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
UpperCAmelCase__ ,UpperCAmelCase__ : List[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
UpperCAmelCase__ : Any = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
UpperCAmelCase__ : Tuple = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ : Optional[int] = '[{}]({})'.format(res[0], wiki_url)
else:
UpperCAmelCase__ : int = sec_titles.split(' & ')
UpperCAmelCase__ : Union[str, Any] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ : Union[str, Any] = find_nearest_training(question)
UpperCAmelCase__ : int = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
UpperCAmelCase__ : Tuple = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
UpperCAmelCase__ : Any = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __UpperCamelCase :
@staticmethod
def __a ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
pass
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Dict:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a : Optional[Any] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : Union[str, Any] =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Tuple = pipeline(
"document-question-answering" , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a : Optional[int] = INVOICE_URL
a : str = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
a : Union[str, Any] = "What is the placebo?"
a : Dict = [
{
"image": load_image(lowerCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Tuple = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> List[Any]:
a : List[Any] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
a : Dict = INVOICE_URL
a : List[str] = "How many cats are there?"
a : Tuple = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
a : Optional[int] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
a : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
a : List[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
a : Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a : Tuple = []
a : Optional[int] = []
a : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Tuple:
a : int = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
a : List[str] = INVOICE_URL
a : List[Any] = "What is the invoice number?"
a : int = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
a : str = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Optional[int]:
a : List[str] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
a : Optional[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
a : str = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __a ( self ) -> str:
a : Optional[int] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
a : int = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , )
a : List[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a : List[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
a : Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
a : Optional[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __a ( self ) -> Tuple:
a : int = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
a : Tuple = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , max_seq_len=50 , )
a : List[str] = INVOICE_URL
a : Union[str, Any] = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
a : List[str] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
a : List[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
a : Any = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def __a ( self ) -> int:
a : Tuple = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
a : Optional[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def __a ( self ) -> int:
pass
| 105 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''xlm-roberta-xl'''
def __init__( self : List[str] , UpperCAmelCase_ : Any=250_880 , UpperCAmelCase_ : Optional[int]=2_560 , UpperCAmelCase_ : Dict=36 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : int=10_240 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : str=514 , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[Any]=1e-05 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : str="absolute" , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Any = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : int = type_vocab_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : Any = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
if self.task == "multiple-choice":
__UpperCAmelCase : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 37 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __UpperCamelCase ( _UpperCAmelCase=None ):
if subparsers is not None:
__UpperCAmelCase : Optional[int] = subparsers.add_parser("env" )
else:
__UpperCAmelCase : List[Any] = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file", default=_UpperCAmelCase, help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Dict = torch.__version__
__UpperCAmelCase : str = torch.cuda.is_available()
__UpperCAmelCase : str = is_xpu_available()
__UpperCAmelCase : List[Any] = is_npu_available()
__UpperCAmelCase : Union[str, Any] = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file ).to_dict()
__UpperCAmelCase : List[str] = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F"{pt_version} ({pt_cuda_available})",
"PyTorch XPU available": str(_UpperCAmelCase ),
"PyTorch NPU available": str(_UpperCAmelCase ),
"System RAM": F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
__UpperCAmelCase : int = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
__UpperCAmelCase : Tuple = (
"\n".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(_UpperCAmelCase, _UpperCAmelCase )
else F"\t{accelerate_config}"
)
print(_UpperCAmelCase )
__UpperCAmelCase : Any = accelerate_config
return info
def __UpperCamelCase ( ):
__UpperCAmelCase : Tuple = env_command_parser()
__UpperCAmelCase : Dict = parser.parse_args()
env_command(_UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 37 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE :Optional[Any] = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE :Dict = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE :Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE :int = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE :List[Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE :Optional[int] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 159 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 159 | 1 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __a ( ):
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowercase_ , "argv" , lowercase_ ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
| 23 |
"""simple docstring"""
_a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 10_000_000
_a = True
_a = False
def __a ( __lowerCamelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase_ : Dict = chain(next_number(__lowerCamelCase ) )
UpperCAmelCase_ : List[str] = number_chain
while number < 1000_0000:
UpperCAmelCase_ : List[Any] = number_chain
number *= 10
return number_chain
def __a ( __lowerCamelCase = 1000_0000 ):
for i in range(1, __lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 23 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BartphoTokenizer
__snake_case = False
__snake_case = True
def __lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
super().setUp()
a = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
a = {'''unk_token''': '''<unk>'''}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
a = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : str , **__UpperCAmelCase : Any ) ->List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : str , __UpperCAmelCase : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
a = '''This is a là test'''
a = '''This is a<unk><unk> test'''
return input_text, output_text
def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
a = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
a = '''This is a là test'''
a = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
a = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
a = tokens + [tokenizer.unk_token]
a = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 0 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _a ( a :List[Any] ) -> Optional[int]:
a = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def _a ( a :List[Any] , a :Optional[int] ) -> Dict:
a = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def _a ( a :Any ) -> List[Any]:
a = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def _a ( ) -> Optional[int]:
a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def _a ( a :Tuple , a :Optional[int] , a :List[Any] , a :Union[str, Any] ) -> Optional[int]:
a = '''imagenet-1k-id2label.json'''
a = 1_000
a = '''huggingface/label-files'''
a = num_labels
a = json.load(open(cached_download(hf_hub_url(a , a , repo_type='''dataset''' ) ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = a = CvtConfig(num_labels=a , idalabel=a , labelaid=a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
a = [2, 2, 20]
a = [3, 12, 16]
a = [192, 768, 1_024]
a = CvtForImageClassification(a )
a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
a = image_size
a = torch.load(a , map_location=torch.device('''cpu''' ) )
a = OrderedDict()
a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
a = list_of_state_dict + cls_token(a )
a = list_of_state_dict + embeddings(a )
for cnt in range(config.depth[idx] ):
a = list_of_state_dict + attention(a , a )
a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a )
for i in range(len(a ) ):
a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
sample_size=(32, 64), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("AttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "AttnUpBlock2D"), )
return model
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), cross_attention_dim=10, )
return model
@property
def lowercase_ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
sample_size=(128, 64), in_channels=1, out_channels=1, latent_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D"), up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D"), )
UpperCamelCase__ = UNetaDModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("AttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "AttnUpBlock2D"), )
return vqvae, unet
@slow
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = Mel(
x_res=self.dummy_unet.config.sample_size[1], y_res=self.dummy_unet.config.sample_size[0], )
UpperCamelCase__ = DDPMScheduler()
UpperCamelCase__ = AudioDiffusionPipeline(vqvae=UpperCamelCase__, unet=self.dummy_unet, mel=UpperCamelCase__, scheduler=UpperCamelCase__ )
UpperCamelCase__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=UpperCamelCase__, steps=4 )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=UpperCamelCase__, steps=4, return_dict=UpperCamelCase__ )
UpperCamelCase__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCamelCase__ = np.frombuffer(image.tobytes(), dtype="uint8" )[:10]
UpperCamelCase__ = np.frombuffer(image_from_tuple.tobytes(), dtype="uint8" )[:10]
UpperCamelCase__ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1], y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0], )
UpperCamelCase__ = DDIMScheduler()
UpperCamelCase__ = self.dummy_vqvae_and_unet
UpperCamelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_vqvae_and_unet[1], mel=UpperCamelCase__, scheduler=UpperCamelCase__ )
UpperCamelCase__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
np.random.seed(0 )
UpperCamelCase__ = np.random.uniform(-1, 1, ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCamelCase__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(42 )
UpperCamelCase__ = pipe(raw_audio=UpperCamelCase__, generator=UpperCamelCase__, start_step=5, steps=10 )
UpperCamelCase__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCamelCase__ = np.frombuffer(image.tobytes(), dtype="uint8" )[:10]
UpperCamelCase__ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ = self.dummy_unet_condition
UpperCamelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=UpperCamelCase__, mel=UpperCamelCase__, scheduler=UpperCamelCase__ )
UpperCamelCase__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
np.random.seed(0 )
UpperCamelCase__ = torch.rand((1, 1, 10) )
UpperCamelCase__ = pipe(generator=UpperCamelCase__, encoding=UpperCamelCase__ )
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = np.frombuffer(image.tobytes(), dtype="uint8" )[:10]
UpperCamelCase__ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = torch_device
UpperCamelCase__ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
UpperCamelCase__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=UpperCamelCase__ )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCamelCase__ = np.frombuffer(image.tobytes(), dtype="uint8" )[:10]
UpperCamelCase__ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 | 368 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 0 |
'''simple docstring'''
def _A ( A__ = 1000 ):
"""simple docstring"""
__lowercase = 2**power
__lowercase = 0
while n:
__lowercase , __lowercase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 104 |
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE_ = getLogger(__name__)
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = DEFAULT_DEVICE , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , **_lowerCAmelCase , ):
__lowerCAmelCase = Path(_lowerCAmelCase ).open("""w""" , encoding="""utf-8""" )
__lowerCAmelCase = str(_lowerCAmelCase )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
__lowerCAmelCase = model.half()
__lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase )
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase , _lowerCAmelCase ) ) ):
__lowerCAmelCase = [prefix + text for text in examples_chunk]
__lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase , padding="""longest""" ).to(_lowerCAmelCase )
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCAmelCase , )
__lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__lowerCAmelCase = int(time.time() - start_time ) # seconds
__lowerCAmelCase = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase ():
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowercase (_lowerCAmelCase=True ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_lowerCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_lowerCAmelCase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_lowerCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_lowerCAmelCase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCAmelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__lowerCAmelCase = generate_summaries_or_translations(
_lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
__lowerCAmelCase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
__lowerCAmelCase = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class SCREAMING_SNAKE_CASE_ :
__magic_name__: int = MBartConfig
__magic_name__: str = {}
__magic_name__: Union[str, Any] = "gelu"
def __init__( self : List[str] , _A : Optional[int] , _A : List[Any]=13 , _A : List[Any]=7 , _A : Dict=True , _A : Tuple=False , _A : Optional[Any]=99 , _A : Dict=32 , _A : str=2 , _A : str=4 , _A : Tuple=37 , _A : Tuple=0.1 , _A : Union[str, Any]=0.1 , _A : Optional[int]=20 , _A : Dict=2 , _A : List[str]=1 , _A : Union[str, Any]=0 , ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[str] = seq_length
snake_case_ : Union[str, Any] = is_training
snake_case_ : Optional[int] = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : Optional[Any] = eos_token_id
snake_case_ : Tuple = pad_token_id
snake_case_ : int = bos_token_id
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ : Union[str, Any] = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , _A : Optional[Any] , _A : int ) -> str:
"""simple docstring"""
snake_case_ : Dict = TFMBartModel(config=_A ).get_decoder()
snake_case_ : Any = inputs_dict['input_ids']
snake_case_ : List[Any] = input_ids[:1, :]
snake_case_ : Dict = inputs_dict['attention_mask'][:1, :]
snake_case_ : Tuple = inputs_dict['head_mask']
snake_case_ : List[Any] = 1
# first forward pass
snake_case_ : Any = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
snake_case_ : str = outputs.to_tuple()
snake_case_ : int = past_key_values[1]
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , ):
if attention_mask is None:
snake_case_ : Optional[int] = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: Tuple = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__magic_name__: int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__magic_name__: Union[str, Any] = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__magic_name__: Tuple = True
__magic_name__: Tuple = False
__magic_name__: Any = False
def UpperCAmelCase_ ( self : Any , _A : Union[str, Any] , _A : List[Any] , _A : str , _A : int , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = TFMBartModelTester(self )
snake_case_ : List[Any] = ConfigTester(self , config_class=_A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__magic_name__: Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
]
__magic_name__: Union[str, Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
__magic_name__: List[Any] = "facebook/mbart-large-en-ro"
@cached_property
def UpperCAmelCase_ ( self : str ) -> List[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
"""simple docstring"""
snake_case_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase_ ( self : Optional[int] , **_A : str ) -> int:
"""simple docstring"""
snake_case_ : List[str] = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def UpperCAmelCase_ ( self : Union[str, Any] , **_A : Dict ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = self.tokenizer(self.src_text , **_A , return_tensors='tf' )
snake_case_ : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case_ : Any = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def UpperCAmelCase_ ( self : str ) -> List[str]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''MobileViTFeatureExtractor''']
_lowerCAmelCase = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 37 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
_lowerCAmelCase = {
'''AI-Sweden/gpt-sw3-126m''': 2048,
'''AI-Sweden/gpt-sw3-350m''': 2048,
'''AI-Sweden/gpt-sw3-1.6b''': 2048,
'''AI-Sweden/gpt-sw3-6.7b''': 2048,
'''AI-Sweden/gpt-sw3-20b''': 2048,
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ : Dict = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowerCAmelCase__ : Tuple = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase__ : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token
lowerCAmelCase__ : Dict = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase__ : Any = unk_token if pad_token is None else pad_token
lowerCAmelCase__ : Dict = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase__ : List[str] = """<pad>""" if pad_token is None else pad_token
lowerCAmelCase__ : Optional[int] = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCAmelCase__ : Optional[int] = do_lower_case
lowerCAmelCase__ : Dict = remove_space
lowerCAmelCase__ : Optional[Any] = keep_accents
lowerCAmelCase__ : int = vocab_file
lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase__ : int = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase__ : List[str] = re.compile(
F"""[{''.join(map(__UpperCAmelCase ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ) -> Any:
lowerCAmelCase__ : int = self.__dict__.copy()
lowerCAmelCase__ : Optional[int] = None
return state
def __setstate__( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCAmelCase__ : Tuple = {}
lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase_ ( self ) -> int:
return len(self.sp_model )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Tuple = self.non_printing_characters_re.sub("""""" ,__UpperCAmelCase )
# Normalize whitespaces
lowerCAmelCase__ : List[Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowerCAmelCase__ : List[Any] = unicodedata.normalize("""NFC""" ,__UpperCAmelCase )
return text
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[Any] = self.preprocess_text(__UpperCAmelCase )
return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
return self.sp_model.PieceToId(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return self.sp_model.IdToPiece(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase_ ( __UpperCAmelCase ) -> str:
return out_string
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[int] = """"""
lowerCAmelCase__ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Optional[Any] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ : Any = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string
def UpperCAmelCase_ ( self ) -> Dict[str, int]:
lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Optional[int] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,"""wb""" ) as fi:
lowerCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = self.preprocess_text(__UpperCAmelCase )
lowerCAmelCase__ : int = self.sp_model.encode(__UpperCAmelCase )
else:
lowerCAmelCase__ : int = [self.preprocess_text(__UpperCAmelCase ) for t in text]
lowerCAmelCase__ : Any = self.sp_model.encode(__UpperCAmelCase )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase__ : Tuple = torch.tensor(__UpperCAmelCase )
return token_ids
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return self.sp_model.decode(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[int]:
lowerCAmelCase__ : List[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowerCAmelCase__ : Any = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=__UpperCAmelCase )
| 37 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 169 |
from __future__ import annotations
from statistics import mean
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = burst_time[i]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = -1
for i in range(UpperCamelCase_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
SCREAMING_SNAKE_CASE__ = i
total_time += burst_time[target_process]
completed += 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
for i in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case = 4
__snake_case = [2, 5, 3, 7]
__snake_case = [0, 0, 0, 0]
__snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 169 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : str ) -> Optional[int]:
UpperCAmelCase : Any = []
def A ( self : Union[str, Any] , __snake_case : str , __snake_case : Any , __snake_case : Dict , **__snake_case : List[Any] ) -> int:
self.events.append('''on_init_end''' )
def A ( self : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : List[Any] , **__snake_case : List[Any] ) -> Dict:
self.events.append('''on_train_begin''' )
def A ( self : Any , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Any , **__snake_case : List[Any] ) -> int:
self.events.append('''on_train_end''' )
def A ( self : int , __snake_case : Dict , __snake_case : Tuple , __snake_case : Optional[int] , **__snake_case : List[Any] ) -> int:
self.events.append('''on_epoch_begin''' )
def A ( self : str , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : List[Any] , **__snake_case : str ) -> Optional[int]:
self.events.append('''on_epoch_end''' )
def A ( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Any , **__snake_case : Any ) -> Optional[int]:
self.events.append('''on_step_begin''' )
def A ( self : Any , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] , **__snake_case : Tuple ) -> Optional[Any]:
self.events.append('''on_step_end''' )
def A ( self : Optional[Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : List[str] , **__snake_case : Any ) -> Dict:
self.events.append('''on_evaluate''' )
def A ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : int , **__snake_case : str ) -> Optional[Any]:
self.events.append('''on_predict''' )
def A ( self : Dict , __snake_case : Any , __snake_case : int , __snake_case : Dict , **__snake_case : List[Any] ) -> Union[str, Any]:
self.events.append('''on_save''' )
def A ( self : str , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any , **__snake_case : Tuple ) -> List[Any]:
self.events.append('''on_log''' )
def A ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Tuple , **__snake_case : Optional[int] ) -> int:
self.events.append('''on_prediction_step''' )
@require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = tempfile.mkdtemp()
def A ( self : Tuple ) -> Any:
shutil.rmtree(self.output_dir )
def A ( self : Tuple , __snake_case : Optional[int]=0 , __snake_case : Any=0 , __snake_case : List[Any]=64 , __snake_case : int=64 , __snake_case : Union[str, Any]=None , __snake_case : Dict=False , **__snake_case : str ) -> List[Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
UpperCAmelCase : Dict = RegressionDataset(length=__snake_case )
UpperCAmelCase : List[Any] = RegressionDataset(length=__snake_case )
UpperCAmelCase : Dict = RegressionModelConfig(a=__snake_case , b=__snake_case )
UpperCAmelCase : Tuple = RegressionPreTrainedModel(__snake_case )
UpperCAmelCase : Tuple = TrainingArguments(self.output_dir , disable_tqdm=__snake_case , report_to=[] , **__snake_case )
return Trainer(
__snake_case , __snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , callbacks=__snake_case , )
def A ( self : Any , __snake_case : List[Any] , __snake_case : List[Any] ) -> Union[str, Any]:
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
# Order doesn't matter
UpperCAmelCase : Dict = sorted(__snake_case , key=lambda __snake_case : cb.__name__ if isinstance(__snake_case , __snake_case ) else cb.__class__.__name__ )
UpperCAmelCase : str = sorted(__snake_case , key=lambda __snake_case : cb.__name__ if isinstance(__snake_case , __snake_case ) else cb.__class__.__name__ )
for cba, cba in zip(__snake_case , __snake_case ):
if isinstance(__snake_case , __snake_case ) and isinstance(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ) and not isinstance(__snake_case , __snake_case ):
self.assertEqual(__snake_case , cba.__class__ )
elif not isinstance(__snake_case , __snake_case ) and isinstance(__snake_case , __snake_case ):
self.assertEqual(cba.__class__ , __snake_case )
else:
self.assertEqual(__snake_case , __snake_case )
def A ( self : Dict , __snake_case : List[str] ) -> int:
UpperCAmelCase : Any = ['''on_init_end''', '''on_train_begin''']
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[int] = len(trainer.get_eval_dataloader() )
UpperCAmelCase : str = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(__snake_case ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : Dict = self.get_trainer()
UpperCAmelCase : Optional[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
# Callbacks passed at init are added to the default callbacks
UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCAmelCase : List[str] = self.get_trainer(disable_tqdm=__snake_case )
UpperCAmelCase : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
def A ( self : int ) -> Dict:
UpperCAmelCase : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCAmelCase : Optional[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__snake_case )
expected_callbacks.remove(__snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
UpperCAmelCase : Dict = self.get_trainer()
UpperCAmelCase : Optional[Any] = trainer.pop_callback(__snake_case )
self.assertEqual(cb.__class__ , __snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
trainer.add_callback(__snake_case )
expected_callbacks.insert(0 , __snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
# We can also add, pop, or remove by instance
UpperCAmelCase : Any = self.get_trainer()
UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__snake_case )
expected_callbacks.remove(__snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
UpperCAmelCase : Optional[Any] = self.get_trainer()
UpperCAmelCase : Dict = trainer.callback_handler.callbacks[0]
UpperCAmelCase : str = trainer.pop_callback(__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
trainer.add_callback(__snake_case )
expected_callbacks.insert(0 , __snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
def A ( self : int ) -> Union[str, Any]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=__snake_case )
UpperCAmelCase : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCAmelCase : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__snake_case , self.get_expected_events(__snake_case ) )
# Independent log/save/eval
UpperCAmelCase : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__snake_case , self.get_expected_events(__snake_case ) )
UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__snake_case , self.get_expected_events(__snake_case ) )
UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
UpperCAmelCase : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__snake_case , self.get_expected_events(__snake_case ) )
UpperCAmelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
UpperCAmelCase : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__snake_case , self.get_expected_events(__snake_case ) )
# A bit of everything
UpperCAmelCase : List[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
UpperCAmelCase : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__snake_case , self.get_expected_events(__snake_case ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
UpperCAmelCase : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__snake_case ) in warn_mock.call_args[0][0]
| 23 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = AltDiffusionPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def A ( self : Dict ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase : Dict = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
UpperCAmelCase : List[Any] = CLIPTextModel(__snake_case )
UpperCAmelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCAmelCase : Optional[int] = 77
UpperCAmelCase : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : Optional[Any] , __snake_case : Dict , __snake_case : List[str]=0 ) -> Union[str, Any]:
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase : str = torch.manual_seed(__snake_case )
else:
UpperCAmelCase : Tuple = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Union[str, Any] ) -> List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def A ( self : Tuple ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def A ( self : Tuple ) -> Optional[int]:
UpperCAmelCase : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Any = self.get_dummy_components()
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase : List[str] = RobertaSeriesModelWithTransformation(__snake_case )
UpperCAmelCase : str = text_encoder
UpperCAmelCase : Optional[int] = AltDiffusionPipeline(**__snake_case )
UpperCAmelCase : str = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__snake_case )
UpperCAmelCase : Optional[int] = '''A photo of an astronaut'''
UpperCAmelCase : List[Any] = alt_pipe(**__snake_case )
UpperCAmelCase : Optional[Any] = output.images
UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : int = PNDMScheduler(skip_prk_steps=__snake_case )
torch.manual_seed(0 )
UpperCAmelCase : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase : Union[str, Any] = RobertaSeriesModelWithTransformation(__snake_case )
UpperCAmelCase : Union[str, Any] = text_encoder
UpperCAmelCase : Optional[int] = AltDiffusionPipeline(**__snake_case )
UpperCAmelCase : Dict = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : int = self.get_dummy_inputs(__snake_case )
UpperCAmelCase : Optional[int] = alt_pipe(**__snake_case )
UpperCAmelCase : Optional[int] = output.images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Optional[int] = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ) -> Any:
# make sure here that pndm scheduler skips prk
UpperCAmelCase : List[Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=__snake_case )
UpperCAmelCase : Tuple = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : List[Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase : Any = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = alt_pipe([prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase : Dict = output.images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : Tuple ) -> int:
UpperCAmelCase : int = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
UpperCAmelCase : Tuple = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=__snake_case , safety_checker=__snake_case )
UpperCAmelCase : Dict = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : Tuple = '''A painting of a squirrel eating a burger'''
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : List[Any] = alt_pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type='''numpy''' )
UpperCAmelCase : Dict = output.images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 23 | 1 |
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Dict = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase : Dict = [mem.copy() for i in range(6 )]
_UpperCAmelCase : int = [mem.copy() for i in range(6 )]
_UpperCAmelCase : List[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = Text("CPU" , font_size=24 )
_UpperCAmelCase : Optional[int] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : str = [mem.copy() for i in range(4 )]
_UpperCAmelCase : Optional[int] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Dict = Text("GPU" , font_size=24 )
_UpperCAmelCase : List[Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Model" , font_size=24 )
_UpperCAmelCase : int = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = []
for i, rect in enumerate(lowerCamelCase__ ):
rect.set_stroke(lowerCamelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_UpperCAmelCase : Optional[int] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCamelCase__ , buff=0.0 )
self.add(lowerCamelCase__ )
cpu_targs.append(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Loaded Checkpoint" , font_size=24 )
_UpperCAmelCase : Optional[int] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , aligned_edge=lowerCamelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_UpperCAmelCase : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Optional[Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowerCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.play(Write(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) )
_UpperCAmelCase : Any = []
_UpperCAmelCase : str = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : Dict = fill.copy().set_fill(lowerCamelCase__ , opacity=0.7 )
target.move_to(lowerCamelCase__ )
first_animations.append(GrowFromCenter(lowerCamelCase__ , run_time=1 ) )
_UpperCAmelCase : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 322 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar('T')
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : T ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = data
_UpperCAmelCase : Node[T] | None = None
def __str__( self : Any ) ->str:
'''simple docstring'''
return F"""{self.data}"""
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) ->None:
'''simple docstring'''
_UpperCAmelCase : Node[T] | None = None
def __iter__( self : List[str] ) ->Iterator[T]:
'''simple docstring'''
_UpperCAmelCase : Any = self.top
while node:
yield node.data
_UpperCAmelCase : Dict = node.next
def __str__( self : Dict ) ->str:
'''simple docstring'''
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __len__( self : Optional[int] ) ->int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def lowerCAmelCase__ ( self : List[Any] ) ->bool:
'''simple docstring'''
return self.top is None
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : T ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Node(lowerCamelCase__ )
if not self.is_empty():
_UpperCAmelCase : Tuple = self.top
_UpperCAmelCase : List[str] = node
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.top
_UpperCAmelCase : Optional[Any] = self.top.next
return pop_node.data
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def lowerCAmelCase__ ( self : List[Any] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 322 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase = 'docs/source/en/_toctree.yml'
def _snake_case ( lowercase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Any = defaultdict(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = []
lowerCAmelCase_ :int = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(lowercase__ )
lowerCAmelCase_ :int = new_doc_list
lowerCAmelCase_ :str = [key for key, value in counts.items() if value > 1]
lowerCAmelCase_ :Tuple = []
for duplicate_key in duplicates:
lowerCAmelCase_ :Any = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(lowercase__ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
lowerCAmelCase_ :int = sorted(lowercase__ , key=lambda lowercase__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowercase__ ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(lowercase__ )
# Sort
return overview_doc
def _snake_case ( lowercase__ : Optional[Any]=False ) -> str:
'''simple docstring'''
with open(lowercase__ , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :int = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase_ :List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ :List[str] = content[api_idx]["""sections"""]
# Then to the model doc
lowerCAmelCase_ :int = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowerCAmelCase_ :Dict = api_doc[scheduler_idx]["""sections"""]
lowerCAmelCase_ :Optional[Any] = clean_doc_toc(lowercase__ )
lowerCAmelCase_ :str = False
if new_scheduler_doc != scheduler_doc:
lowerCAmelCase_ :Optional[int] = True
if overwrite:
lowerCAmelCase_ :Tuple = new_scheduler_doc
if diff:
if overwrite:
lowerCAmelCase_ :str = api_doc
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def _snake_case ( lowercase__ : Any=False ) -> int:
'''simple docstring'''
with open(lowercase__ , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :int = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase_ :Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ :Optional[int] = content[api_idx]["""sections"""]
# Then to the model doc
lowerCAmelCase_ :List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowerCAmelCase_ :Optional[int] = False
lowerCAmelCase_ :Any = api_doc[pipeline_idx]["""sections"""]
lowerCAmelCase_ :str = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowerCAmelCase_ :int = pipeline_doc["""section"""]
lowerCAmelCase_ :Tuple = clean_doc_toc(lowercase__ )
if overwrite:
lowerCAmelCase_ :List[str] = new_sub_pipeline_doc
new_pipeline_docs.append(lowercase__ )
# sort overall pipeline doc
lowerCAmelCase_ :Union[str, Any] = clean_doc_toc(lowercase__ )
if new_pipeline_docs != pipeline_docs:
lowerCAmelCase_ :Tuple = True
if overwrite:
lowerCAmelCase_ :Optional[Any] = new_pipeline_docs
if diff:
if overwrite:
lowerCAmelCase_ :Tuple = api_doc
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__UpperCAmelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 84 | '''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Any , A : Optional[int]=None , A : Tuple=None , *A : Tuple , **A : List[str] ):
super().__init__(*A , **A )
if config is None:
assert isinstance(self.model , A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_UpperCAmelCase : str = self.model.config
else:
_UpperCAmelCase : List[str] = config
_UpperCAmelCase : List[Any] = data_args
_UpperCAmelCase : str = self.config.tgt_vocab_size if isinstance(self.config , A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
_UpperCAmelCase : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase : Dict = label_smoothed_nll_loss
def _A ( self : Tuple , A : int ):
if self.optimizer is None:
_UpperCAmelCase : Tuple = ["bias", "LayerNorm.weight"]
_UpperCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
_UpperCAmelCase : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase : List[str] = Adafactor
_UpperCAmelCase : List[Any] = {"scale_parameter": False, "relative_step": False}
else:
_UpperCAmelCase : List[str] = AdamW
_UpperCAmelCase : List[str] = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_UpperCAmelCase : List[Any] = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase : List[Any] = OSS(
params=A , optim=A , **A , )
else:
_UpperCAmelCase : Union[str, Any] = optimizer_cls(A , **A )
if self.lr_scheduler is None:
_UpperCAmelCase : List[str] = self._get_lr_scheduler(A )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _A ( self : List[str] , A : Optional[int] ):
_UpperCAmelCase : List[str] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase : str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A )
return scheduler
def _A ( self : Tuple ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _A ( self : Any , A : Union[str, Any] , A : Union[str, Any] , A : List[Any] ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase : List[str] = model(**A , use_cache=A )[0]
_UpperCAmelCase : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase : Any = model(**A , labels=A , use_cache=A )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase : Optional[int] = model(**A , use_cache=A )[0]
_UpperCAmelCase : List[str] = torch.nn.functional.log_softmax(A , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.loss_fn(A , A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _A ( self : List[str] , A : Optional[int] , A : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = inputs.pop("labels" )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._compute_loss(A , A , A )
return loss
def _A ( self : List[str] , A : nn.Module , A : Dict[str, Union[torch.Tensor, Any]] , A : bool , A : Optional[List[str]] = None , ):
_UpperCAmelCase : List[str] = self._prepare_inputs(A )
_UpperCAmelCase : Dict = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : int = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
_UpperCAmelCase : Any = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase : str = self._compute_loss(A , A , A )
_UpperCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : Optional[Any] = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _A ( self : Dict , A : int , A : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase : Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F""" padded to `max_length`={max_length}""" )
_UpperCAmelCase : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase : Tuple = tensor
return padded_tensor
| 31 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=8 ):
'''simple docstring'''
UpperCAmelCase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : DDPMScheduler , _UpperCAmelCase : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , movq=_UpperCAmelCase , )
UpperCAmelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
if latents is None:
UpperCAmelCase__ = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCAmelCase__ = latents.to(_UpperCAmelCase )
UpperCAmelCase__ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCAmelCase__ = torch.device(f'''cuda:{gpu_id}''' )
UpperCAmelCase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[Any]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCAmelCase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase__ = cpu_offload_with_hook(_UpperCAmelCase , _UpperCAmelCase , prev_module_hook=_UpperCAmelCase )
# We'll offload the last model manually.
UpperCAmelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCAmelCase )
def __call__( self : str , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : int = 5_12 , _UpperCAmelCase : int = 5_12 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 4.0 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = self._execution_device
UpperCAmelCase__ = guidance_scale > 1.0
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ = torch.cat(_UpperCAmelCase , dim=0 )
UpperCAmelCase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ = torch.cat(_UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ = image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
UpperCAmelCase__ = negative_image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
UpperCAmelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCAmelCase )
self.scheduler.set_timesteps(_UpperCAmelCase , device=_UpperCAmelCase )
UpperCAmelCase__ = self.scheduler.timesteps
UpperCAmelCase__ = self.unet.config.in_channels
UpperCAmelCase__ = downscale_height_and_width(_UpperCAmelCase , _UpperCAmelCase , self.movq_scale_factor )
# create initial latent
UpperCAmelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ = {'image_embeds': image_embeds}
UpperCAmelCase__ = self.unet(
sample=_UpperCAmelCase , timestep=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , added_cond_kwargs=_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase__ = noise_pred.chunk(2 )
UpperCAmelCase__ = variance_pred.chunk(2 )
UpperCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase , )[0]
# post-processing
UpperCAmelCase__ = self.movq.decode(_UpperCAmelCase , force_not_quantize=_UpperCAmelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCAmelCase__ = image * 0.5 + 0.5
UpperCAmelCase__ = image.clamp(0 , 1 )
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 351 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 61 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 169 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase =False
class __magic_name__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa)
pipe.to(snake_case)
pipe.set_progress_bar_config(disable=snake_case)
_UpperCAmelCase : Union[str, Any] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
_UpperCAmelCase : Dict =torch.manual_seed(0)
_UpperCAmelCase : str =pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case)
_UpperCAmelCase : Union[str, Any] =VersatileDiffusionPipeline.from_pretrained(snake_case , torch_dtype=torch.floataa)
pipe.to(snake_case)
pipe.set_progress_bar_config(disable=snake_case)
_UpperCAmelCase : List[Any] =generator.manual_seed(0)
_UpperCAmelCase : Dict =pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa)
pipe.to(snake_case)
pipe.set_progress_bar_config(disable=snake_case)
_UpperCAmelCase : List[str] ='cyberpunk 2077'
_UpperCAmelCase : Dict =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
_UpperCAmelCase : Any =torch.manual_seed(0)
_UpperCAmelCase : Optional[int] =pipe.dual_guided(
prompt=snake_case , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
_UpperCAmelCase : int =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] =np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
_UpperCAmelCase : Dict ='A painting of a squirrel eating a burger '
_UpperCAmelCase : Union[str, Any] =torch.manual_seed(0)
_UpperCAmelCase : str =pipe.text_to_image(
prompt=snake_case , generator=snake_case , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy').images
_UpperCAmelCase : Any =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : str =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
_UpperCAmelCase : int =pipe.image_variation(snake_case , generator=snake_case , output_type='numpy').images
_UpperCAmelCase : Optional[Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] =np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
| 242 |
'''simple docstring'''
import numpy as np
class __magic_name__ :
def __init__( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] =(0, 0)
_UpperCAmelCase : Any =None
_UpperCAmelCase : List[str] =0
_UpperCAmelCase : Dict =0
_UpperCAmelCase : Dict =0
def __eq__( self , snake_case) -> Dict:
'''simple docstring'''
return self.position == cell.position
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
print(self.position)
class __magic_name__ :
def __init__( self , snake_case=(5, 5)) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] =np.zeros(snake_case)
_UpperCAmelCase : Optional[Any] =world_size[0]
_UpperCAmelCase : Any =world_size[1]
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
print(self.w)
def lowerCAmelCase ( self , snake_case) -> Any:
'''simple docstring'''
_UpperCAmelCase : str =[
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_UpperCAmelCase : int =cell.position[0]
_UpperCAmelCase : Optional[Any] =cell.position[1]
_UpperCAmelCase : Dict =[]
for n in neughbour_cord:
_UpperCAmelCase : List[str] =current_x + n[0]
_UpperCAmelCase : int =current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_UpperCAmelCase : List[Any] =Cell()
_UpperCAmelCase : Dict =(x, y)
_UpperCAmelCase : Any =cell
neighbours.append(snake_case)
return neighbours
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : List[Any] =[]
_UpperCAmelCase : Union[str, Any] =[]
_open.append(__lowerCamelCase )
while _open:
_UpperCAmelCase : List[str] =np.argmin([n.f for n in _open] )
_UpperCAmelCase : Optional[int] =_open[min_f]
_closed.append(_open.pop(__lowerCamelCase ) )
if current == goal:
break
for n in world.get_neigbours(__lowerCamelCase ):
for c in _closed:
if c == n:
continue
_UpperCAmelCase : Tuple =current.g + 1
_UpperCAmelCase , _UpperCAmelCase : Any =n.position
_UpperCAmelCase , _UpperCAmelCase : List[str] =goal.position
_UpperCAmelCase : Optional[int] =(ya - ya) ** 2 + (xa - xa) ** 2
_UpperCAmelCase : Optional[Any] =n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__lowerCamelCase )
_UpperCAmelCase : str =[]
while current.parent is not None:
path.append(current.position )
_UpperCAmelCase : Any =current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase =Gridworld()
# Start position and goal
lowercase =Cell()
lowercase =(0, 0)
lowercase =Cell()
lowercase =(4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase =astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase =1
print(world.w)
| 242 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _UpperCamelCase ( lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = """dinat"""
UpperCAmelCase_ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase :Union[str, Any]=4 , lowerCamelCase :List[Any]=3 , lowerCamelCase :Dict=64 , lowerCamelCase :int=[3, 4, 6, 5] , lowerCamelCase :Union[str, Any]=[2, 4, 8, 16] , lowerCamelCase :Optional[Any]=7 , lowerCamelCase :str=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase :Union[str, Any]=3.0 , lowerCamelCase :List[str]=True , lowerCamelCase :int=0.0 , lowerCamelCase :Tuple=0.0 , lowerCamelCase :Union[str, Any]=0.1 , lowerCamelCase :Optional[int]="gelu" , lowerCamelCase :List[Any]=0.02 , lowerCamelCase :Optional[Any]=1e-5 , lowerCamelCase :str=0.0 , lowerCamelCase :int=None , lowerCamelCase :Optional[int]=None , **lowerCamelCase :Optional[int] , ) -> int:
super().__init__(**lowerCamelCase )
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = kernel_size
UpperCAmelCase__ = dilations
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
UpperCAmelCase__ = layer_scale_init_value
UpperCAmelCase__ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(lowerCamelCase ) + 1 )]
UpperCAmelCase__ , UpperCAmelCase__ = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 169 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowerCAmelCase : List[str] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class _UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :str , lowerCamelCase :bool , lowerCamelCase :str = None , lowerCamelCase :list = None ) -> Tuple:
UpperCAmelCase__ = None
UpperCAmelCase__ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
UpperCAmelCase__ = os.path.abspath("examples" )
for item in os.listdir(lowerCamelCase ):
if item not in EXCLUDE_EXAMPLES:
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.isfile(lowerCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCamelCase , feature_script=lowerCamelCase , tested_section="main()" if parser_only else "training_function()" , ):
UpperCAmelCase__ = compare_against_test(
os.path.join(lowerCamelCase , lowerCamelCase ) , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = "\n".join(lowerCamelCase )
if special_strings is not None:
for string in special_strings:
UpperCAmelCase__ = diff.replace(lowerCamelCase , "" )
self.assertEqual(lowerCamelCase , "" )
def UpperCAmelCase_ ( self :List[str] ) -> Any:
self.one_complete_example("complete_nlp_example.py" , lowerCamelCase )
self.one_complete_example("complete_nlp_example.py" , lowerCamelCase )
def UpperCAmelCase_ ( self :str ) -> int:
UpperCAmelCase__ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
UpperCAmelCase__ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.one_complete_example("complete_cv_example.py" , lowerCamelCase , lowerCamelCase , lowerCamelCase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = False
@classmethod
def UpperCAmelCase_ ( cls :List[Any] ) -> Any:
super().setUpClass()
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase__ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def UpperCAmelCase_ ( cls :Union[str, Any] ) -> Optional[int]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase_ ( self :Dict ) -> Dict:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def UpperCAmelCase_ ( self :Optional[int] ) -> Any:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def UpperCAmelCase_ ( self :Tuple ) -> Dict:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
self.assertNotIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
def UpperCAmelCase_ ( self :Dict ) -> int:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
if torch.cuda.is_available():
UpperCAmelCase__ = torch.cuda.device_count()
else:
UpperCAmelCase__ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
else:
self.assertIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
@slow
def UpperCAmelCase_ ( self :Dict ) -> Optional[int]:
UpperCAmelCase__ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
UpperCAmelCase__ = re.findall("({.+})" , lowerCamelCase )
UpperCAmelCase__ = [r for r in results if "accuracy" in r][-1]
UpperCAmelCase__ = ast.literal_eval(lowerCamelCase )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def UpperCAmelCase_ ( self :int ) -> Optional[int]:
UpperCAmelCase__ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase_ ( self :List[Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
UpperCAmelCase__ = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "tracking" ) ) )
def UpperCAmelCase_ ( self :Any ) -> Dict:
UpperCAmelCase__ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def UpperCAmelCase_ ( self :Any ) -> Optional[int]:
UpperCAmelCase__ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 169 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : str =KandinskyVaaControlnetPipeline
UpperCamelCase__ : Optional[Any] =["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCamelCase__ : List[Any] =["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCamelCase__ : List[Any] =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase__ : Union[str, Any] =False
@property
def __lowercase ( self ):
"""simple docstring"""
return 32
@property
def __lowercase ( self ):
"""simple docstring"""
return 32
@property
def __lowercase ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def __lowercase ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowercase ( self ):
"""simple docstring"""
return 100
@property
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Tuple ={
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase : Dict =UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def __lowercase ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] =VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =self.dummy_unet
__UpperCamelCase : Dict =self.dummy_movq
__UpperCamelCase : Any =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowerCamelCase__ , )
__UpperCamelCase : Optional[Any] ={
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : Tuple =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : List[str] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create hint
__UpperCamelCase : Any =floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : Dict =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Dict =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple ={
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] ='cpu'
__UpperCamelCase : Dict =self.get_dummy_components()
__UpperCamelCase : Optional[Any] =self.pipeline_class(**lowerCamelCase__ )
__UpperCamelCase : str =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
__UpperCamelCase : Dict =output.images
__UpperCamelCase : Optional[Any] =pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
__UpperCamelCase : Tuple =image[0, -3:, -3:, -1]
__UpperCamelCase : Any =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : int =np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
__UpperCamelCase : Tuple =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
__UpperCamelCase : List[Any] =torch.from_numpy(np.array(lowerCamelCase__ ) ).float() / 255.0
__UpperCamelCase : Tuple =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__UpperCamelCase : Tuple =KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
__UpperCamelCase : Dict =pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Tuple ='A robot, 4k photo'
__UpperCamelCase : str =torch.Generator(device='cuda' ).manual_seed(0 )
__UpperCamelCase : Tuple =pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__UpperCamelCase : Union[str, Any] =torch.Generator(device='cuda' ).manual_seed(0 )
__UpperCamelCase : Dict =pipeline(
image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , hint=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=100 , output_type='np' , )
__UpperCamelCase : int =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 359 |
def A ( a_ ) -> bool:
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_a = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=None ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase: Tuple = XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
__lowerCAmelCase: Dict = finetuning_task
__lowerCAmelCase: int = GLUE_TASKS_NUM_LABELS[finetuning_task]
__lowerCAmelCase: Optional[int] = XLNetForSequenceClassification(SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
__lowerCAmelCase: Optional[Any] = finetuning_task
__lowerCAmelCase: Any = XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Union[str, Any] = XLNetLMHeadModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
__lowerCAmelCase: str = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
_a = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 322 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( snake_case__ ):
_lowercase : int = ['image_processor', 'tokenizer']
_lowercase : Union[str, Any] = 'LayoutLMv3ImageProcessor'
_lowercase : List[str] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Any , UpperCAmelCase : Dict=None , UpperCAmelCase : Tuple=None , **UpperCAmelCase : Optional[Any] ) -> str:
__lowerCAmelCase: str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase , )
__lowerCAmelCase: List[Any] = kwargs.pop('feature_extractor' )
__lowerCAmelCase: Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
__lowerCAmelCase: str = self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCAmelCase: List[str] = features['words']
__lowerCAmelCase: List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
__lowerCAmelCase: Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowerCAmelCase: int = self.get_overflowing_images(UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowerCAmelCase: str = images
return encoded_inputs
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowerCAmelCase: str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}''' )
return images_with_overflow
def UpperCAmelCase ( self : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Any , *UpperCAmelCase : Dict , **UpperCAmelCase : Any ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase , )
return self.image_processor
| 322 | 1 |
import math
import unittest
def a__ ( A__ ):
assert isinstance(lowerCAmelCase__, lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(lowerCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase__ ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 366 |
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ : Tuple = len(A__ ) if (len(A__ ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ), 'Stack'.center(A__ ), 'Postfix'.center(A__ ), sep=' | ', )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(A__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(A__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(A__ ) == 0:
stack.append(A__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(A__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(A__ ) # push x to stack
print(
x.center(8 ), (''.join(A__ )).ljust(A__ ), (''.join(A__ )).ljust(A__ ), sep=' | ', ) # Output in tabular format
while len(A__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ), (''.join(A__ )).ljust(A__ ), (''.join(A__ )).ljust(A__ ), sep=' | ', ) # Output in tabular format
return "".join(A__ ) # return Postfix as str
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = list(infix[::-1] ) # reverse the infix equation
for i in range(len(A__ ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ : Optional[int] = ')' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ : Optional[int] = '(' # change ")" to "("
return (infix_2_postfix(''.join(A__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCAmelCase__ : str =input('\nEnter an Infix Equation = ') # Input an Infix equation
lowerCAmelCase__ : Optional[Any] =''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 162 | 0 |
from random import randint, random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 5 , ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = [[-1] * number_of_cells] # Create a highway without any car
__UpperCamelCase :Optional[int] = 0
__UpperCamelCase :Any = max(SCREAMING_SNAKE_CASE , 0 )
while i < number_of_cells:
__UpperCamelCase :Union[str, Any] = (
randint(0 , SCREAMING_SNAKE_CASE ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = 0
__UpperCamelCase :Optional[int] = highway_now[car_index + 1 :]
for cell in range(len(SCREAMING_SNAKE_CASE ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(SCREAMING_SNAKE_CASE , -1 )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = len(SCREAMING_SNAKE_CASE )
# Beforce calculations, the highway is empty
__UpperCamelCase :List[str] = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__UpperCamelCase :Optional[int] = min(highway_now[car_index] + 1 , SCREAMING_SNAKE_CASE )
# Number of empty cell before the next car
__UpperCamelCase :Dict = get_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - 1
# We can't have the car causing an accident
__UpperCamelCase :Tuple = min(next_highway[car_index] , SCREAMING_SNAKE_CASE )
if random() < probability:
# Randomly, a driver will slow down
__UpperCamelCase :Optional[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = len(highway[0] )
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :int = update(highway[i] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Dict = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__UpperCamelCase :Dict = (car_index + speed) % number_of_cells
# Commit the change of position
__UpperCamelCase :Union[str, Any] = speed
highway.append(SCREAMING_SNAKE_CASE )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = int(__lowerCamelCase )
if n_element < 1:
UpperCAmelCase_ : List[Any] = ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ : List[Any] = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = (0, 0, 0)
UpperCAmelCase_ : Dict = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2, hamming_list[j] * 3, hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_a = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_a = hamming(int(n))
print('-----------------------------------------------------')
print(f"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 61 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """sentencepiece.bpe.model"""}
a ={
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
a ={
"""camembert-base""": 512,
}
a ="""▁"""
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[str]="<s>" ,SCREAMING_SNAKE_CASE__ : List[str]="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="</s>" ,SCREAMING_SNAKE_CASE__ : int="<s>" ,SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<pad>" ,SCREAMING_SNAKE_CASE__ : Optional[int]="<mask>" ,SCREAMING_SNAKE_CASE__ : Dict=["<s>NOTUSED", "</s>NOTUSED"] ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token
__lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,additional_special_tokens=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Dict = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__lowerCamelCase : Dict = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
__lowerCamelCase : Optional[int] = len(self.fairseq_tokens_to_ids)
__lowerCamelCase : List[Any] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
__lowerCamelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
__lowerCamelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def lowerCAmelCase ( self : Tuple):
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Optional[Any] = ''
__lowerCamelCase : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) + token
__lowerCamelCase : int = True
__lowerCamelCase : Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__)
return out_string.strip()
def __getstate__( self : int):
__lowerCamelCase : Optional[Any] = self.__dict__.copy()
__lowerCamelCase : Any = None
return state
def __setstate__( self : int ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Any = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : int = {}
__lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
| 113 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a =["""gpt2"""]
a ="""gpt2"""
if is_tf_available():
class A_ ( tf.Module ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__()
__lowerCamelCase : List[Any] = tokenizer
__lowerCamelCase : str = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = TFGPTaLMHeadModel.from_config(SCREAMING_SNAKE_CASE__)
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name='text'),))
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : str = self.tokenizer(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = tokenized['input_ids'].to_tensor()
__lowerCamelCase : List[Any] = tf.cast(input_ids_dense > 0 ,tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCamelCase : Union[str, Any] = self.model(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)['logits']
return outputs
@require_tf
@require_keras_nlp
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str]):
super().setUp()
__lowerCamelCase : str = [GPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCamelCase : List[Any] = [TFGPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
__lowerCamelCase : Optional[int] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
__lowerCamelCase : List[str] = list(zip(self.test_sentences ,self.test_sentences[::-1]))
def lowerCAmelCase ( self : Optional[int]):
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers):
for test_inputs in self.test_sentences:
__lowerCamelCase : Union[str, Any] = tokenizer([test_inputs] ,return_tensors='tf')
__lowerCamelCase : Dict = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCamelCase : List[str] = python_outputs[key].numpy()
__lowerCamelCase : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(SCREAMING_SNAKE_CASE__ ,tf.intaa) == tf_outputs_values))
@slow
def lowerCAmelCase ( self : Optional[Any]):
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase : Dict = tf.function(SCREAMING_SNAKE_CASE__)
for test_inputs in self.test_sentences:
__lowerCamelCase : Any = tf.constant(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = compiled_tokenizer(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = tf_tokenizer(SCREAMING_SNAKE_CASE__)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def lowerCAmelCase ( self : str):
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase : Any = ModelToSave(tokenizer=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]])
__lowerCamelCase : str = model.serving(SCREAMING_SNAKE_CASE__) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__) / 'saved.model'
tf.saved_model.save(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,signatures={'serving_default': model.serving})
__lowerCamelCase : Tuple = tf.saved_model.load(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = loaded_model.signatures['serving_default'](SCREAMING_SNAKE_CASE__)['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def lowerCAmelCase ( self : int):
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]])
__lowerCamelCase : Optional[Any] = tf_tokenizer(SCREAMING_SNAKE_CASE__) # Build model with some sample inputs
__lowerCamelCase : str = tf_tokenizer.get_config()
__lowerCamelCase : List[str] = TFGPTaTokenizer.from_config(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = model_from_config(SCREAMING_SNAKE_CASE__)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def lowerCAmelCase ( self : Dict):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCamelCase : List[Any] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__lowerCamelCase : Dict = tf.convert_to_tensor([self.test_sentences[0]])
__lowerCamelCase : Union[str, Any] = tf_tokenizer(SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 113 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCamelCase ( a_ ):
@staticmethod
@abstractmethod
def _lowerCAmelCase ( UpperCamelCase : ArgumentParser ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
| 242 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : List[str] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowerCAmelCase__ : List[str] = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ : Any = in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCAmelCase__ : int = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCAmelCase__ : int = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Any = dct.pop(__UpperCAmelCase )
lowerCAmelCase__ : Any = val
def lowercase_ ( __UpperCAmelCase ) -> int:
if "handwritten" in checkpoint_url:
lowerCAmelCase__ : Tuple = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCAmelCase__ : Optional[int] = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowerCAmelCase__ : int = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Any = ViTConfig(image_size=384 , qkv_bias=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCAmelCase__ : List[str] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCAmelCase__ : Dict = 1024
lowerCAmelCase__ : Tuple = 4096
lowerCAmelCase__ : Optional[Any] = 24
lowerCAmelCase__ : Tuple = 16
lowerCAmelCase__ : List[str] = 1024
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[str] = """relu"""
lowerCAmelCase__ : Dict = 1024
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[Any] = False
# load HuggingFace model
lowerCAmelCase__ : Tuple = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase )
lowerCAmelCase__ : int = TrOCRForCausalLM(__UpperCAmelCase )
lowerCAmelCase__ : Any = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
lowerCAmelCase__ : Union[str, Any] = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location="""cpu""" , check_hash=__UpperCAmelCase )["""model"""]
lowerCAmelCase__ : List[Any] = create_rename_keys(__UpperCAmelCase , __UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCAmelCase__ : int = state_dict.pop(__UpperCAmelCase )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowerCAmelCase__ : Optional[Any] = val
else:
lowerCAmelCase__ : int = val
# load state dict
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image
lowerCAmelCase__ : Any = ViTImageProcessor(size=encoder_config.image_size )
lowerCAmelCase__ : Optional[Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowerCAmelCase__ : List[str] = TrOCRProcessor(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Dict = processor(images=prepare_img(__UpperCAmelCase ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowerCAmelCase__ : str = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowerCAmelCase__ : List[str] = model(pixel_values=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
lowerCAmelCase__ : str = outputs.logits
lowerCAmelCase__ : Union[str, Any] = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
lowerCAmelCase__ : Optional[int] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowerCAmelCase__ : int = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
lowerCAmelCase__ : int = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , __UpperCAmelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_A = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 242 | 1 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __snake_case ( _lowerCAmelCase : Tuple ) -> Tuple:
A_ : Dict = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : str ) -> int:
A_ : Optional[Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A_ : Tuple = s_dict.pop(_lowerCAmelCase )
elif "subsample" in key:
A_ : Union[str, Any] = s_dict.pop(_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : List[str] ) -> Optional[int]:
A_ : Dict = emb.weight.shape
A_ : Tuple = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
A_ : Tuple = emb.weight.data
return lin_layer
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any ) -> Tuple:
A_ : str = torch.load(_lowerCAmelCase , map_location="cpu" )
A_ : Optional[Any] = mam_aaa["args"]
A_ : List[Any] = mam_aaa["model"]
A_ : Optional[Any] = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(_lowerCAmelCase )
rename_keys(_lowerCAmelCase )
A_ : Union[str, Any] = state_dict["decoder.embed_tokens.weight"].shape[0]
A_ : Optional[int] = args.share_decoder_input_output_embed
A_ : List[str] = [int(_lowerCAmelCase ) for i in args.conv_kernel_sizes.split("," )]
A_ : int = SpeechaTextConfig(
vocab_size=_lowerCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(_lowerCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=_lowerCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_lowerCAmelCase , num_beams=5 , max_length=200 , use_cache=_lowerCAmelCase , decoder_start_token_id=2 , early_stopping=_lowerCAmelCase , )
A_ : List[str] = SpeechaTextForConditionalGeneration(_lowerCAmelCase )
A_ : Optional[int] = model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0 and not set(_lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f" but all the following weights are missing {missing}" )
if tie_embeds:
A_ : Optional[int] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A_ : List[Any] = lm_head_weights
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_lowerCAmelCase : Dict = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 354 |
from collections.abc import Sequence
def __snake_case ( _lowerCAmelCase : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
A_ : Any = nums[0]
for i in range(1 , len(_lowerCAmelCase ) ):
A_ : Any = nums[i]
A_ : List[str] = max(_lowerCAmelCase , ans + num , _lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowerCAmelCase : List[Any] = int(input('''Enter number of elements : ''').strip())
_lowerCAmelCase : Dict = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 70 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCamelCase ( __lowerCamelCase : Any ) ->int:
if not is_accelerate_available():
return method
_SCREAMING_SNAKE_CASE = version.parse(accelerate.__version__ ).base_version
if version.parse(_A ) < version.parse("""0.17.0""" ):
return method
def wrapper(self : List[str] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *_A , **_A )
return wrapper
| 58 |
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase__ : Tuple = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase__ : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __lowercase ( _A ) -> str:
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) )
| 245 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __magic_name__ :
def __init__( self : Any ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : Tuple=32 ,_UpperCAmelCase : Dict=16 ,_UpperCAmelCase : Optional[Any]=3 ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : str=32 ,_UpperCAmelCase : Tuple=4 ,_UpperCAmelCase : List[Any]=[0, 1, 2, 3] ,_UpperCAmelCase : List[str]=4 ,_UpperCAmelCase : Any=37 ,_UpperCAmelCase : List[Any]="gelu" ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : Optional[Any]=0.02 ,_UpperCAmelCase : Optional[Any]=3 ,_UpperCAmelCase : List[str]=[1, 384, 24, 24] ,_UpperCAmelCase : Union[str, Any]=True ,_UpperCAmelCase : List[str]=None ,):
_a : int = parent
_a : Optional[Any] = batch_size
_a : str = image_size
_a : str = patch_size
_a : Any = num_channels
_a : Optional[Any] = is_training
_a : str = use_labels
_a : Union[str, Any] = hidden_size
_a : int = num_hidden_layers
_a : str = backbone_out_indices
_a : List[Any] = num_attention_heads
_a : Any = intermediate_size
_a : List[Any] = hidden_act
_a : int = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : Optional[int] = initializer_range
_a : Tuple = num_labels
_a : Tuple = backbone_featmap_shape
_a : Optional[int] = scope
_a : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_a : str = (image_size // patch_size) ** 2
_a : Optional[int] = num_patches + 1
def __lowercase ( self : Dict ):
_a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Any = None
if self.use_labels:
_a : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_a : int = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : int ):
_a : Dict = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=_UpperCAmelCase ,backbone_featmap_shape=self.backbone_featmap_shape ,)
def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = DPTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ):
_a : List[Any] = self.num_labels
_a : Dict = DPTForDepthEstimation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) )
def __lowercase ( self : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ):
_a : Optional[int] = self.num_labels
_a : Optional[int] = DPTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Optional[int] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowercase ( self : List[Any] ):
_a : List[str] = self.prepare_config_and_inputs()
_a , _a , _a : List[Any] = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCAmelCase : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[str] = False
def __lowercase ( self : Union[str, Any] ):
_a : Union[str, Any] = DPTModelTester(self )
_a : int = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def __lowercase ( self : Tuple ):
pass
def __lowercase ( self : List[str] ):
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) )
def __lowercase ( self : Any ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_UpperCAmelCase )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Union[str, Any] = [*signature.parameters.keys()]
_a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : int ):
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = True
if model_class in get_values(_UpperCAmelCase ):
continue
_a : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : Optional[int] = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : Union[str, Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[Any] = False
_a : List[Any] = True
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
_a : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : int = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_a : Tuple = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
_a : Any = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_a : Union[str, Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : Optional[Any] ):
pass
@slow
def __lowercase ( self : Union[str, Any] ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_a : List[Any] = DPTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowercase ( self : Any ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[Any] = 'add'
with self.assertRaises(_UpperCAmelCase ):
_a : List[str] = DPTForDepthEstimation(_UpperCAmelCase )
def __lowerCamelCase ( ) -> Optional[Any]:
_a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Any ):
_a : Optional[Any] = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_a : Union[str, Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCAmelCase )
_a : Optional[int] = prepare_img()
_a : Optional[Any] = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Tuple = model(**_UpperCAmelCase )
_a : Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
_a : str = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape ,_UpperCAmelCase )
_a : int = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,_UpperCAmelCase ,atol=1E-4 ) )
| 107 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __magic_name__ :
lowerCAmelCase : str = field(
metadata={'help': 'The output directory where the model will be written.'} , )
lowerCAmelCase : str = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
lowerCAmelCase : str = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def __lowerCamelCase ( ) -> Union[str, Any]:
_a : Any = HfArgumentParser((ModelArguments,) )
((_a) , ) : Dict = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_a : Optional[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_a : Optional[Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_a : List[str] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_a : Optional[int] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_a : List[Any] = True
_a : int = True
_a : Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCAmelCase_ , decoder_config=lowerCAmelCase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_a : List[str] = decoder_config.decoder_start_token_id
_a : Optional[int] = decoder_config.pad_token_id
if decoder_start_token_id is None:
_a : Tuple = decoder_config.bos_token_id
if pad_token_id is None:
_a : List[Any] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_a : Any = decoder_config.eos_token_id
_a : Tuple = decoder_start_token_id
_a : Any = pad_token_id
_a : Dict = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_a : Dict = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_a : int = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 107 | 1 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowercase :
pass
| 101 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> Union[str, Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = self.vocab_size - 1
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = OpenAIGPTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = OpenAIGPTLMHeadModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = OpenAIGPTDoubleHeadsModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = OpenAIGPTForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
A_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
A_ = inputs_dict["""labels"""]
A_ = inputs_dict["""labels"""]
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase__ , )
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = OpenAIGPTModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=37 )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = OpenAIGPTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(UpperCamelCase__ )
A_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCamelCase__ ) # the president is
A_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase__ )
| 162 | 0 |
"""simple docstring"""
__A = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
__A = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
_lowerCAmelCase =from_type.lower().strip("""s""" )
_lowerCAmelCase =to_type.lower().strip("""s""" )
_lowerCAmelCase =UNIT_SYMBOL.get(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =UNIT_SYMBOL.get(__UpperCamelCase , __UpperCamelCase )
if from_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase =(
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
if to_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase =(
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
_lowerCAmelCase =METRIC_CONVERSION[from_sanitized]
_lowerCAmelCase =METRIC_CONVERSION[to_sanitized]
_lowerCAmelCase =1
if from_exponent > to_exponent:
_lowerCAmelCase =from_exponent - to_exponent
else:
_lowerCAmelCase =-(to_exponent - from_exponent)
return value * pow(10 , __UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 341 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
__A = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
__A = 0
__A = 1
__A = 2
__A = 3
__A = 4
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = '''left'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =3
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =remove_space
_lowerCAmelCase =keep_accents
_lowerCAmelCase =vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> str:
return len(self.sp_model )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
return state
def __setstate__( self , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if self.remove_space:
_lowerCAmelCase =""" """.join(inputs.strip().split() )
else:
_lowerCAmelCase =inputs
_lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase )
_lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase =outputs.lower()
return outputs
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =self.preprocess_text(__UpperCAmelCase )
_lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_lowerCAmelCase =[]
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase =cur_pieces[1:]
else:
_lowerCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str:
_lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase )
_lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
_lowerCAmelCase =[]
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase ="""""".join(__UpperCAmelCase )
_lowerCAmelCase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 341 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase (SCREAMING_SNAKE_CASE_ : int | str ) -> bool:
SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
return n == n[::-1]
def lowercase (SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ) -> str:
SCREAMING_SNAKE_CASE = 0
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
if is_palindrome(SCREAMING_SNAKE_CASE_ ) and is_palindrome(bin(SCREAMING_SNAKE_CASE_ ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 113 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
SCREAMING_SNAKE_CASE = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = 1_28
elif "12-12" in model_name:
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 12
elif "14-14" in model_name:
SCREAMING_SNAKE_CASE = 14
SCREAMING_SNAKE_CASE = 14
elif "16-16" in model_name:
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 16
else:
raise ValueError('Model not supported' )
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = 35
SCREAMING_SNAKE_CASE = 'speech-commands-v2-id2label.json'
else:
SCREAMING_SNAKE_CASE = 5_27
SCREAMING_SNAKE_CASE = 'audioset-id2label.json'
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
if "module.v" in name:
SCREAMING_SNAKE_CASE = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
SCREAMING_SNAKE_CASE = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
SCREAMING_SNAKE_CASE = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace('attn' , 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
SCREAMING_SNAKE_CASE = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
SCREAMING_SNAKE_CASE = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
SCREAMING_SNAKE_CASE = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split('.' )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
SCREAMING_SNAKE_CASE = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE_ )
# rename some keys
SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load 🤗 model
SCREAMING_SNAKE_CASE = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
SCREAMING_SNAKE_CASE = -4.2_67_73_93 if 'speech-commands' not in model_name else -6.84_59_78
SCREAMING_SNAKE_CASE = 4.5_68_99_74 if 'speech-commands' not in model_name else 5.5_65_45_26
SCREAMING_SNAKE_CASE = 10_24 if 'speech-commands' not in model_name else 1_28
SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = load_dataset('speech_commands' , 'v0.02' , split='validation' )
SCREAMING_SNAKE_CASE = dataset[0]['audio']['array']
else:
SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torchaudio.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = waveform.squeeze().numpy()
SCREAMING_SNAKE_CASE = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=1_60_00 , return_tensors='pt' )
# forward pass
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
SCREAMING_SNAKE_CASE = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
SCREAMING_SNAKE_CASE = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
SCREAMING_SNAKE_CASE = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
SCREAMING_SNAKE_CASE = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
SCREAMING_SNAKE_CASE = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
SCREAMING_SNAKE_CASE = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
SCREAMING_SNAKE_CASE = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
SCREAMING_SNAKE_CASE = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCamelCase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 113 | 1 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 309 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_A = logging.get_logger(__name__)
class lowerCamelCase ( snake_case_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__(self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(**__snake_case )
UpperCAmelCase__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224}
UpperCAmelCase__ : List[str] = get_size_dict(__snake_case , default_to_square=__snake_case )
UpperCAmelCase__ : List[Any] = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
UpperCAmelCase__ : Any = get_size_dict(__snake_case , param_name="""crop_size""" )
UpperCAmelCase__ : List[Any] = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[str] = resample
UpperCAmelCase__ : str = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Optional[int] = do_center_crop
UpperCAmelCase__ : Optional[Any] = crop_size
UpperCAmelCase__ : Union[str, Any] = do_flip_channel_order
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PIL.Image.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase__ : Tuple = get_resize_output_image_size(__snake_case , size=size["""shortest_edge"""] , default_to_square=__snake_case )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
return flip_channel_order(__snake_case , data_format=__snake_case )
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : List[str] = resample if resample is not None else self.resample
UpperCAmelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase__ : Optional[int] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__snake_case , default_to_square=__snake_case )
UpperCAmelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Union[str, Any] = get_size_dict(__snake_case , param_name="""crop_size""" )
UpperCAmelCase__ : Optional[Any] = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase__ : str = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
UpperCAmelCase__ : List[Any] = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Optional[int] = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
UpperCAmelCase__ : Any = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase__ : str = [self.flip_channel_order(image=__snake_case ) for image in images]
UpperCAmelCase__ : Union[str, Any] = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
UpperCAmelCase__ : int = {"""pixel_values""": images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__snake_case ):
UpperCAmelCase__ : int = target_sizes.numpy()
UpperCAmelCase__ : Union[str, Any] = []
for idx in range(len(__snake_case ) ):
UpperCAmelCase__ : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__snake_case )
UpperCAmelCase__ : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__snake_case )
else:
UpperCAmelCase__ : str = logits.argmax(dim=1 )
UpperCAmelCase__ : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 171 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
_lowercase: Optional[datasets.Features] = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
_lowercase: Tuple = PandasConfig
def lowercase__ ( self : Optional[Any] ) -> str:
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : List[str] , __snake_case : Dict ) -> int:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case , (str, list, tuple) ):
_lowerCAmelCase = data_files
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) )
return splits
def lowercase__ ( self : List[Any] , __snake_case : pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase = table_cast(__snake_case , self.config.features.arrow_schema )
return pa_table
def lowercase__ ( self : Dict , __snake_case : Optional[Any] ) -> Any:
for i, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
with open(__snake_case , """rb""" ) as f:
_lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(__snake_case ) )
yield i, self._cast_table(__snake_case )
| 70 | 0 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = k_size // 2
__UpperCAmelCase : int = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__UpperCAmelCase : List[Any] = 1 / (2 * pi * sigma) * exp(-(square(__lowerCAmelCase ) + square(__lowerCAmelCase )) / (2 * square(__lowerCAmelCase )) )
return g
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Any = image.shape[0], image.shape[1]
# dst image height and width
__UpperCAmelCase : str = height - k_size + 1
__UpperCAmelCase : int = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__UpperCAmelCase : str = zeros((dst_height * dst_width, k_size * k_size) )
__UpperCAmelCase : str = 0
for i, j in product(range(__lowerCAmelCase ) , range(__lowerCAmelCase ) ):
__UpperCAmelCase : Tuple = ravel(image[i : i + k_size, j : j + k_size] )
__UpperCAmelCase : Optional[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
__UpperCAmelCase : Tuple = gen_gaussian_kernel(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase : Tuple = ravel(__lowerCAmelCase )
# reshape and get the dst image
__UpperCAmelCase : Tuple = dot(__lowerCAmelCase , __lowerCAmelCase ).reshape(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
return dst
if __name__ == "__main__":
# read original image
_UpperCamelCase = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
_UpperCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_UpperCamelCase = gaussian_filter(gray, 3, sigma=1)
_UpperCamelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 371 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class _A ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
'''simple docstring'''
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 16 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase : Optional[Any] = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 107 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
SCREAMING_SNAKE_CASE_ : str = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments,) )
((a) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
a = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
a = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
a = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
a = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
a = True
a = True
a = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path, decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path, encoder_config=A, decoder_config=A, )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
a = decoder_config.decoder_start_token_id
a = decoder_config.pad_token_id
if decoder_start_token_id is None:
a = decoder_config.bos_token_id
if pad_token_id is None:
a = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
a = decoder_config.eos_token_id
a = decoder_start_token_id
a = pad_token_id
a = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
a = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
a = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 107 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """levit"""
def __init__( self : Dict , lowerCamelCase_ : str=224 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : str=1 , lowerCamelCase_ : Tuple=16 , lowerCamelCase_ : int=[128, 256, 384] , lowerCamelCase_ : Optional[int]=[4, 8, 12] , lowerCamelCase_ : List[Any]=[4, 4, 4] , lowerCamelCase_ : List[str]=[16, 16, 16] , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : str=[2, 2, 2] , lowerCamelCase_ : Union[str, Any]=[2, 2, 2] , lowerCamelCase_ : Tuple=0.0_2 , **lowerCamelCase_ : int , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = kernel_size
UpperCamelCase = stride
UpperCamelCase = padding
UpperCamelCase = hidden_sizes
UpperCamelCase = num_attention_heads
UpperCamelCase = depths
UpperCamelCase = key_dim
UpperCamelCase = drop_path_rate
UpperCamelCase = patch_size
UpperCamelCase = attention_ratio
UpperCamelCase = mlp_ratio
UpperCamelCase = initializer_range
UpperCamelCase = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return 1E-4
| 165 | from math import pi
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 165 | 1 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = tmp_path / """cache"""
_snake_case = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = tmp_path / """cache"""
_snake_case = {"""text""": """string"""}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = tmp_path / """cache"""
_snake_case = {"""text""": """string"""}
_snake_case = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = text_path
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = [text_path]
_snake_case = tmp_path / """cache"""
_snake_case = {"""text""": """string"""}
_snake_case = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=("train",) ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for split in splits:
_snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = tmp_path / """cache"""
_snake_case = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader({"""train""": text_path} , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_snake_case = {"""text""": """string"""}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader({"""train""": text_path} , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if split:
_snake_case = {split: text_path}
else:
_snake_case = """train"""
_snake_case = {"""train""": text_path, """test""": text_path}
_snake_case = tmp_path / """cache"""
_snake_case = {"""text""": """string"""}
_snake_case = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() ) | 341 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 10**9 ):
_snake_case = 1
_snake_case = 2
_snake_case = 0
_snake_case = 0
_snake_case = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_snake_case = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''') | 341 | 1 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> Tuple:
def constraint_to_multiple_of(lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : Optional[Any]=None ):
__a = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__a = math.floor(val / multiple ) * multiple
if x < min_val:
__a = math.ceil(val / multiple ) * multiple
return x
__a = (output_size, output_size) if isinstance(_A , _A ) else output_size
__a = get_image_size(_A )
__a = output_size
# determine new height and width
__a = output_height / input_height
__a = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__a = scale_width
else:
# fit height
__a = scale_height
__a = constraint_to_multiple_of(scale_height * input_height , multiple=_A )
__a = constraint_to_multiple_of(scale_width * input_width , multiple=_A )
return (new_height, new_width)
class __lowerCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ['pixel_values']
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = False , _a = 1 , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ):
super().__init__(**__snake_case )
__a = size if size is not None else {'height': 384, 'width': 384}
__a = get_size_dict(__snake_case )
__a = do_resize
__a = size
__a = keep_aspect_ratio
__a = ensure_multiple_of
__a = resample
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self , _a , _a , _a = False , _a = 1 , _a = PILImageResampling.BICUBIC , _a = None , **_a , ):
__a = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
__a = get_resize_output_image_size(
__snake_case , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=__snake_case , multiple=__snake_case , )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def __UpperCAmelCase ( self , _a , _a , _a = None , **_a , ):
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def __UpperCAmelCase ( self , _a , _a , _a , _a = None , **_a , ):
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def __UpperCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(__snake_case )
__a = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__a = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__a = resample if resample is not None else self.resample
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
__a = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_rescale:
__a = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
__a = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
__a = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
__a = {'pixel_values': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
def __UpperCAmelCase ( self , _a , _a = None ):
__a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__snake_case ):
__a = target_sizes.numpy()
__a = []
for idx in range(len(__snake_case ) ):
__a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__snake_case )
__a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__snake_case )
else:
__a = logits.argmax(dim=1 )
__a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 357 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a , _a ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def __UpperCAmelCase ( self ):
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __UpperCAmelCase ( self ):
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
__a = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 11 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCamelCase_ = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class a_ (tr.AbstractTransform ):
def __init__( self , snake_case_ = " " ):
_lowerCAmelCase : List[Any] = sentence_delimiter
def __UpperCamelCase ( self , snake_case_ ):
return list(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for sent_idx, sentence in enumerate(snake_case_ ):
chars.extend(self.process_string(snake_case_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(snake_case_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
UpperCamelCase_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCamelCase_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCamelCase_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
UpperCamelCase_ = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
UpperCamelCase_ = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ (datasets.Metric ):
def __UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_=False ):
if concatenate_texts:
return jiwer.compute_measures(
snake_case_ , snake_case_ , truth_transform=snake_case_ , hypothesis_transform=snake_case_ , )["wer"]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = 0
for prediction, reference in zip(snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[int] = jiwer.compute_measures(
snake_case_ , snake_case_ , truth_transform=snake_case_ , hypothesis_transform=snake_case_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 309 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309 | 1 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Tuple =ComputeEnvironment.AMAZON_SAGEMAKER
lowerCamelCase : Optional[Any] =True
lowerCamelCase : List[str] ="ml.p3.2xlarge"
lowerCamelCase : Union[str, Any] ="accelerate_sagemaker_execution_role"
lowerCamelCase : Tuple ="hf-sm"
lowerCamelCase : List[Any] ="us-east-1"
lowerCamelCase : List[str] =1
lowerCamelCase : List[str] ="accelerate-sagemaker-1"
lowerCamelCase : List[str] ="1.6"
lowerCamelCase : str ="4.4"
lowerCamelCase : List[Any] ="train.py"
lowerCamelCase : List[Any] =[
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
lowerCamelCase : Tuple =[
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , lowerCAmelCase )
assert isinstance(converted_args["""do_train"""] , lowerCAmelCase )
assert isinstance(converted_args["""epochs"""] , lowerCAmelCase )
assert isinstance(converted_args["""learning_rate"""] , lowerCAmelCase )
assert isinstance(converted_args["""max_steps"""] , lowerCAmelCase )
with pytest.raises(lowerCAmelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 139 |
from __future__ import annotations
def snake_case_ (__A : list[float] , __A : list[float] ) -> float:
__lowerCAmelCase : Union[str, Any] = sorted(numsa + numsa )
__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = divmod(len(__A ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
__UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 139 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ReformerTokenizer
_UpperCamelCase : Tuple = ReformerTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = True
def __A ( self ):
super().setUp()
_lowerCAmelCase : Any = ReformerTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : Optional[int] = """<s>"""
_lowerCAmelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(a__ ) , 1000 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = self.get_rust_tokenizer()
_lowerCAmelCase : Any = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : Any = tokenizer.tokenize(a__ )
_lowerCAmelCase : Dict = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : str = tokenizer.encode(a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : List[str] = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[Any] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Optional[Any] = ReformerTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [285, 46, 10, 170, 382] , )
_lowerCAmelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = """Hello World!"""
_lowerCAmelCase : Tuple = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Dict = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
_lowerCAmelCase : Union[str, Any] = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@require_torch
@slow
def __A ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_lowerCAmelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowerCAmelCase : Dict = """ """.join(a__ )
_lowerCAmelCase : List[str] = self.big_tokenizer.encode_plus(a__ , return_tensors="""pt""" )
_lowerCAmelCase : Optional[int] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
_lowerCAmelCase : List[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_lowerCAmelCase : Optional[int] = encoded_sequence["""input_ids"""].shape
_lowerCAmelCase : Any = ReformerModel(a__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a__ )
model(**a__ )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : Dict = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_lowerCAmelCase : str = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=a__ , sequences=a__ , )
| 44 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 16 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase (UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
_snake_case = KandinskyVaaControlnetPipeline
_snake_case = ["""image_embeds""", """negative_image_embeds""", """hint"""]
_snake_case = ["""image_embeds""", """negative_image_embeds""", """hint"""]
_snake_case = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_snake_case = False
@property
def UpperCAmelCase ( self ) -> Any:
return 3_2
@property
def UpperCAmelCase ( self ) -> Union[str, Any]:
return 3_2
@property
def UpperCAmelCase ( self ) -> List[str]:
return self.time_input_dim
@property
def UpperCAmelCase ( self ) -> str:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return 1_0_0
@property
def UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
snake_case : Any = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case : Optional[int] = UNetaDConditionModel(**_a )
return model
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Union[str, Any] = self.dummy_unet
snake_case : Optional[Any] = self.dummy_movq
snake_case : Any = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_a , )
snake_case : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self , A , A=0 ) -> Optional[Any]:
snake_case : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
snake_case : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
snake_case : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
snake_case : Optional[Any] = torch.manual_seed(_a )
else:
snake_case : Tuple = torch.Generator(device=_a ).manual_seed(_a )
snake_case : Tuple = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self ) -> str:
snake_case : str = """cpu"""
snake_case : Any = self.get_dummy_components()
snake_case : str = self.pipeline_class(**_a )
snake_case : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case : Dict = pipe(**self.get_dummy_inputs(_a ) )
snake_case : Optional[int] = output.images
snake_case : Any = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
snake_case : int = image[0, -3:, -3:, -1]
snake_case : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : Tuple = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> str:
snake_case : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case : int = torch.from_numpy(np.array(_a ) ).float() / 2_55.0
snake_case : List[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
snake_case : Optional[Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
snake_case : Tuple = """A robot, 4k photo"""
snake_case : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case : List[Any] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case : Union[str, Any] = pipeline(
image_embeds=_a , negative_image_embeds=_a , hint=_a , generator=_a , num_inference_steps=1_0_0 , output_type="""np""" , )
snake_case : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(_a , _a )
| 362 |
import warnings
from functools import wraps
from typing import Callable
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase ,**lowercase ):
warnings.warn(
(f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") ,lowercase ,)
return fn(*lowercase ,**lowercase )
return _inner_fn
| 176 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Optional[Any] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 165 |
"""simple docstring"""
def A ( snake_case__ = 10_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1, 1
SCREAMING_SNAKE_CASE__ = 2
while True:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = fa + fa
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fa, f
index += 1
for _ in str(snake_case__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 165 | 1 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
SCREAMING_SNAKE_CASE :Optional[int] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE :Union[str, Any] = 50 # max width of layer names
SCREAMING_SNAKE_CASE :Optional[Any] = 70 # max width of quantizer names
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=a_ , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=a_ , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=a_ , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=a_ , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=a_ , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=a_ , type=a_ , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=a_ , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
if args.calibrator == "max":
__A = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
__A = "histogram"
elif args.calibrator == "mse":
__A = "histogram"
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
__A = QuantDescriptor(num_bits=args.aprec , calib_method=a_ )
__A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(a_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(a_ )
def UpperCAmelCase ( a_ , a_ , a_=False , a_=False ) -> Optional[Any]:
"""simple docstring"""
logger.info("Configuring Model for Quantization" )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(a_ , ["embeddings"] , which="weight" , _disabled=a_ )
if args.quant_disable:
set_quantizer_by_name(a_ , [""] , _disabled=a_ )
if args.quant_disable_keyword:
set_quantizer_by_name(a_ , args.quant_disable_keyword , _disabled=a_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(a_ , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=a_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(a_ , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=a_ )
if args.recalibrate_weights:
recalibrate_weights(a_ )
if args.fuse_qkv:
fuse_qkv(a_ , a_ )
if args.clip_gelu:
clip_gelu(a_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(a_ )
def UpperCAmelCase ( a_ , a_ ) -> List[Any]:
"""simple docstring"""
def fusea(a_ , a_ , a_ ):
for mod in [qq, qk, qv]:
if not hasattr(a_ , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
__A = qq._amax.detach().item()
__A = qk._amax.detach().item()
__A = qv._amax.detach().item()
__A = max(a_ , a_ , a_ )
qq._amax.fill_(a_ )
qk._amax.fill_(a_ )
qv._amax.fill_(a_ )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
__A = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=a_ )
__A = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(a_ , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
__A = mod.weight.shape[0]
__A = mod._weight_quantizer._amax.detach()
__A = torch.ones(a_ , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(a_ , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__A = set(range(len(mod.weight.size() ) ) ) - axis_set
__A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=a_ , keepdims=a_ ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
__A = amax
def UpperCAmelCase ( a_ , a_=2_5 , a_=1_8_0 , a_=None ) -> List[Any]:
"""simple docstring"""
if ignore is None:
__A = []
elif not isinstance(a_ , a_ ):
__A = [ignore]
__A = 0
for name, mod in model.named_modules():
if not hasattr(a_ , "weight" ):
continue
__A = max(a_ , len(a_ ) )
for name, mod in model.named_modules():
__A = getattr(a_ , "_input_quantizer" , a_ )
__A = getattr(a_ , "_weight_quantizer" , a_ )
if not hasattr(a_ , "weight" ):
continue
if type(a_ ) in ignore:
continue
if [True for s in ignore if type(a_ ) is str and s in name]:
continue
__A = F'''Act:{input_q.extra_repr()}'''
__A = F'''Wgt:{weight_q.extra_repr()}'''
__A = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(a_ ) <= line_width:
logger.info(a_ )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{' ':{name_width}} {wgt_str}''' )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = 0
for name, mod in model.named_modules():
if isinstance(a_ , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
__A = getattr(a_ , a_ , a_ )
if quantizer_mod is not None:
assert hasattr(a_ , a_ )
setattr(a_ , a_ , a_ )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def UpperCAmelCase ( a_ , a_ , a_="both" , **a_ ) -> Dict:
"""simple docstring"""
__A = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(a_ , a_ , "_input_quantizer" , a_ , a_ )
if which in ["weight", "both"]:
set_quantizer(a_ , a_ , "_weight_quantizer" , a_ , a_ )
logger.info(a_ )
def UpperCAmelCase ( a_ , a_ , **a_ ) -> Optional[Any]:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(a_ , "_input_quantizer" ) or hasattr(a_ , "_weight_quantizer" ):
for n in names:
if re.search(a_ , a_ ):
set_quantizers(a_ , a_ , **a_ )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(a_ , a_ ):
__A = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(a_ , a_ , a_ )
logger.info(a_ )
| 352 |
def UpperCAmelCase ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
SCREAMING_SNAKE_CASE :List[str] = generate_large_matrix()
SCREAMING_SNAKE_CASE :str = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
assert all(row == sorted(a_ , reverse=a_ ) for row in grid )
assert all(list(a_ ) == sorted(a_ , reverse=a_ ) for col in zip(*a_ ) )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
__A = len(a_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__A = (left + right) // 2
__A = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__A = mid + 1
else:
__A = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
__A = len(grid[0] )
for i in range(len(a_ ) ):
__A = find_negative_index(grid[i][:bound] )
total += bound
return (len(a_ ) * len(grid[0] )) - total
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
for row in grid:
for i, number in enumerate(a_ ):
if number < 0:
total += len(a_ ) - i
break
return total
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
__A = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__A = timeit(F'''{func}(grid=grid)''' , setup=a_ , number=5_0_0 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 124 | 0 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowercase__ = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = True , ):
_lowerCamelCase : Union[str, Any] = [file for file in os.listdir(lowercase ) if os.path.isfile(os.path.join(lowercase , lowercase ) )]
if identifier is not None:
_lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase , lowercase ):
for n_ in n_identifier:
_lowerCamelCase : str = [file for file in files if n_ not in file]
else:
_lowerCamelCase : Dict = [file for file in files if n_identifier not in file]
_lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py' )
_lowerCamelCase : Union[str, Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase )
if only_modules:
_lowerCamelCase : List[str] = file.split('.' )[0]
try:
_lowerCamelCase : Tuple = getattr(lowercase , lowercase )
_lowerCamelCase : List[Any] = doctest.DocTestSuite(lowercase )
_lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(lowercase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
_lowerCamelCase : Any = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def A_ ( self ):
_lowerCamelCase : int = Path('src/transformers' )
_lowerCamelCase : List[Any] = 'modeling'
_lowerCamelCase : Dict = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase , identifier=lowercase , ignore_files=lowercase )
def A_ ( self ):
_lowerCamelCase : int = Path('src/transformers' )
_lowerCamelCase : Tuple = 'tokenization'
self.analyze_directory(lowercase , identifier=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = Path('src/transformers' )
_lowerCamelCase : int = 'configuration'
self.analyze_directory(lowercase , identifier=lowercase )
def A_ ( self ):
_lowerCamelCase : int = Path('src/transformers' )
_lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase , n_identifier=lowercase )
def A_ ( self ):
_lowerCamelCase : int = Path('docs/source' )
_lowerCamelCase : List[str] = ['favicon.ico']
self.analyze_directory(lowercase , ignore_files=lowercase , only_modules=lowercase ) | 96 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = CTRLTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
lowercase_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase_ = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
lowercase_ = {"unk_token": "<unk>"}
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase ) )
def A__ ( self , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = "adapt react readapt apt"
lowercase_ = "adapt react readapt apt"
return input_text, output_text
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ = "adapt react readapt apt"
lowercase_ = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
lowercase_ = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowercase_ = tokens + [tokenizer.unk_token]
lowercase_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
| 297 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase )
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
def extract(*UpperCAmelCase , **UpperCAmelCase ):
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.ones([0] )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
lowercase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ = init_image.resize((760, 504) )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
lowercase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase_ = init_image.resize((768, 512) )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 297 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
A_ = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _snake_case ( _a ):
_A : Any = '''deformable_detr'''
_A : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Dict=300 ,SCREAMING_SNAKE_CASE__ : Any=1_024 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=6 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_024 ,SCREAMING_SNAKE_CASE__ : List[str]=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6 ,SCREAMING_SNAKE_CASE__ : List[str]=1_024 ,SCREAMING_SNAKE_CASE__ : Any=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Dict="relu" ,SCREAMING_SNAKE_CASE__ : Any=256 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : Tuple="sine" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="resnet50" ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=False ,SCREAMING_SNAKE_CASE__ : List[str]=4 ,SCREAMING_SNAKE_CASE__ : str=4 ,SCREAMING_SNAKE_CASE__ : Any=4 ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=300 ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Tuple=1 ,SCREAMING_SNAKE_CASE__ : List[str]=5 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 ,SCREAMING_SNAKE_CASE__ : List[str]=1 ,SCREAMING_SNAKE_CASE__ : Any=5 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.25 ,SCREAMING_SNAKE_CASE__ : List[Any]=False ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE:List[str] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Union[str, Any] = backbone_config.get("model_type" )
SCREAMING_SNAKE_CASE:str = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE:Optional[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = use_timm_backbone
SCREAMING_SNAKE_CASE:Union[str, Any] = backbone_config
SCREAMING_SNAKE_CASE:int = num_channels
SCREAMING_SNAKE_CASE:Optional[int] = num_queries
SCREAMING_SNAKE_CASE:Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE:Optional[Any] = d_model
SCREAMING_SNAKE_CASE:str = encoder_ffn_dim
SCREAMING_SNAKE_CASE:Optional[Any] = encoder_layers
SCREAMING_SNAKE_CASE:int = encoder_attention_heads
SCREAMING_SNAKE_CASE:Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE:Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE:List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE:List[Any] = dropout
SCREAMING_SNAKE_CASE:Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE:int = activation_dropout
SCREAMING_SNAKE_CASE:Optional[int] = activation_function
SCREAMING_SNAKE_CASE:Union[str, Any] = init_std
SCREAMING_SNAKE_CASE:Tuple = init_xavier_std
SCREAMING_SNAKE_CASE:List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE:Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE:str = position_embedding_type
SCREAMING_SNAKE_CASE:Optional[int] = backbone
SCREAMING_SNAKE_CASE:List[str] = use_pretrained_backbone
SCREAMING_SNAKE_CASE:Optional[Any] = dilation
# deformable attributes
SCREAMING_SNAKE_CASE:Union[str, Any] = num_feature_levels
SCREAMING_SNAKE_CASE:Any = encoder_n_points
SCREAMING_SNAKE_CASE:Any = decoder_n_points
SCREAMING_SNAKE_CASE:Optional[Any] = two_stage
SCREAMING_SNAKE_CASE:int = two_stage_num_proposals
SCREAMING_SNAKE_CASE:Tuple = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
SCREAMING_SNAKE_CASE:int = class_cost
SCREAMING_SNAKE_CASE:Optional[int] = bbox_cost
SCREAMING_SNAKE_CASE:str = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE:str = mask_loss_coefficient
SCREAMING_SNAKE_CASE:Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE:List[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE:Optional[Any] = giou_loss_coefficient
SCREAMING_SNAKE_CASE:Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE:Dict = focal_alpha
SCREAMING_SNAKE_CASE:Tuple = disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
@property
def __UpperCamelCase ( self : Any ):
return self.encoder_attention_heads
@property
def __UpperCamelCase ( self : List[str] ):
return self.d_model
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE:List[Any] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE:Any = self.__class__.model_type
return output
| 139 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def A_ ( snake_case , snake_case , snake_case , snake_case = 100 , ):
SCREAMING_SNAKE_CASE:Any = x_start
SCREAMING_SNAKE_CASE:int = fnc(snake_case )
SCREAMING_SNAKE_CASE:int = 0.0
for _ in range(snake_case ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE:Optional[int] = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE:int = fnc(snake_case )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE:Tuple = xa
SCREAMING_SNAKE_CASE:List[Any] = fxa
return length
if __name__ == "__main__":
def A_ ( snake_case ):
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
A_ = 10
while i <= 10_00_00:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 139 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A_ ( _lowerCAmelCase = 3 ) -> qiskit.result.counts.Counts:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(_lowerCAmelCase ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
UpperCamelCase : Optional[int] = QuantumRegister(_lowerCAmelCase , "qr" )
UpperCamelCase : Optional[Any] = ClassicalRegister(_lowerCAmelCase , "cr" )
UpperCamelCase : List[Any] = QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : int = number_of_qubits
for i in range(_lowerCAmelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_lowerCAmelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _lowerCAmelCase , _lowerCAmelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_lowerCAmelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_lowerCAmelCase , _lowerCAmelCase )
# simulate with 10000 shots
UpperCamelCase : List[Any] = Aer.get_backend("qasm_simulator" )
UpperCamelCase : str = execute(_lowerCAmelCase , _lowerCAmelCase , shots=1_0000 )
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 140 |
import os
def A_ ( ) -> int:
UpperCamelCase : List[str] = os.path.dirname(os.path.realpath(_lowerCAmelCase ) )
UpperCamelCase : Any = os.path.join(_lowerCAmelCase , "triangle.txt" )
with open(_lowerCAmelCase ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
UpperCamelCase : Tuple = []
for line in triangle:
UpperCamelCase : List[str] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(_lowerCAmelCase ) )
a.append(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
for j in range(len(a[i] ) ):
UpperCamelCase : List[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCamelCase : Dict = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCAmelCase , _lowerCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 140 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[int] ="BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] ="AutoTokenizer"
def __init__( self : Dict , __A : Optional[int] , __A : Union[str, Any] , __A : Any ):
super().__init__(__A , __A )
# add QFormer tokenizer
__UpperCamelCase = qformer_tokenizer
def __call__( self : str , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
__UpperCamelCase = BatchFeature()
if text is not None:
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
encoding.update(__A )
__UpperCamelCase = self.qformer_tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
__UpperCamelCase = qformer_text_encoding.pop('input_ids' )
__UpperCamelCase = qformer_text_encoding.pop('attention_mask' )
if images is not None:
__UpperCamelCase = self.image_processor(__A , return_tensors=__A )
encoding.update(__A )
return encoding
def _lowerCamelCase ( self : List[str] , *__A : Dict , **__A : Dict ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowerCamelCase ( self : Optional[Any] , *__A : Union[str, Any] , **__A : Optional[int] ):
return self.tokenizer.decode(*__A , **__A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self : Union[str, Any] , __A : Union[str, Any] , **__A : Dict ):
if os.path.isfile(__A ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(__A )
return super().save_pretrained(__A , **__A )
@classmethod
def _lowerCamelCase ( cls : List[Any] , __A : int , **__A : Dict ):
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , subfolder='qformer_tokenizer' )
__UpperCamelCase = cls._get_arguments_from_pretrained(__A , **__A )
args.append(__A )
return cls(*__A )
| 53 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
__snake_case = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
__snake_case = {
"""facebook/bart-base""": 10_24,
"""facebook/bart-large""": 10_24,
"""facebook/bart-large-mnli""": 10_24,
"""facebook/bart-large-cnn""": 10_24,
"""facebook/bart-large-xsum""": 10_24,
"""yjernite/bart_eli5""": 10_24,
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple =VOCAB_FILES_NAMES
A__ : Any =PRETRAINED_VOCAB_FILES_MAP
A__ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Tuple =["""input_ids""", """attention_mask"""]
A__ : Optional[int] =BartTokenizer
def __init__( self : str , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : List[Any]="</s>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : Tuple="<s>" , UpperCAmelCase_ : Any="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=True , **UpperCAmelCase_ : List[Any] , ):
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase_ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = pre_tok_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE__ = 'post_processor'
SCREAMING_SNAKE_CASE__ = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE__ = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE__ = tuple(state['cls'] )
SCREAMING_SNAKE_CASE__ = False
if state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = True
if state.get('trim_offsets' , UpperCAmelCase_ ) != trim_offsets:
SCREAMING_SNAKE_CASE__ = trim_offsets
SCREAMING_SNAKE_CASE__ = True
if changes_to_apply:
SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase_ , state.pop('type' ) )
SCREAMING_SNAKE_CASE__ = component_class(**UpperCAmelCase_ )
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
@property
def A_ ( self : Tuple ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self : Any , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else value
SCREAMING_SNAKE_CASE__ = value
def A_ ( self : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ = kwargs.get('is_split_into_words' , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = kwargs.get('is_split_into_words' , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 176 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class A_ ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
__snake_case = 'resnet'
__snake_case = ['basic', 'bottleneck']
def __init__( self: List[Any] , a: Optional[Any]=3 , a: Dict=64 , a: Tuple=[256, 512, 1024, 2048] , a: str=[3, 4, 6, 3] , a: Dict="bottleneck" , a: str="relu" , a: str=False , a: Optional[Any]=None , a: Dict=None , **a: Tuple , ):
super().__init__(**a )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : List[Any] = embedding_size
__lowerCamelCase : List[Any] = hidden_sizes
__lowerCamelCase : str = depths
__lowerCamelCase : Union[str, Any] = layer_type
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Tuple = downsample_in_first_stage
__lowerCamelCase : Optional[Any] = ['stem'] + [F'stage{idx}' for idx in range(1 , len(a ) + 1 )]
__lowerCamelCase , __lowerCamelCase : int = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names )
class A_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = version.parse("""1.11""" )
@property
def _snake_case ( self: Tuple ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _snake_case ( self: Optional[int] ):
return 1e-3
| 356 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Any = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowerCamelCase : Dict = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowerCamelCase : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowerCamelCase : Tuple = subset[i - 1][j]
if arr[i - 1] <= j:
__lowerCamelCase : List[Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> float:
A_ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase__ ( ) -> List[str]:
print(sum_of_series(1, 1, 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case : Optional[int] = str(bin(lowercase ) )
binary_number += "0" * shift_amount
return binary_number
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case : Dict = str(bin(lowercase ) )[2:]
if shift_amount >= len(lowercase ):
return "0b0"
snake_case : str = binary_number[: len(lowercase ) - shift_amount]
return "0b" + shifted_binary_number
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
if number >= 0: # Get binary representation of positive number
snake_case : Optional[Any] = """0""" + str(bin(lowercase ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case : Dict = len(bin(lowercase )[3:] ) # Find 2's complement of number
snake_case : Optional[Any] = bin(abs(lowercase ) - (1 << binary_number_length) )[3:]
snake_case : Tuple = (
"""1""" + """0""" * (binary_number_length - len(lowercase )) + binary_number
)
if shift_amount >= len(lowercase ):
return "0b" + binary_number[0] * len(lowercase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowercase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =AutoencoderKL
snake_case_ ="""sample"""
snake_case_ =1e-2
@property
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = 4
lowerCAmelCase__ : List[str] = 3
lowerCAmelCase__ : Any = (32, 32)
lowerCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
return {"sample": image}
@property
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowerCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skipIf(torch_device == '''mps''' ,'''Gradient checkpointing skipped on MPS''' )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = self.model_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
lowerCAmelCase__ : List[Any] = model(**__lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowerCAmelCase__ : Dict = torch.randn_like(__lowerCamelCase )
lowerCAmelCase__ : Dict = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowerCAmelCase__ : str = self.model_class(**__lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowerCAmelCase__ : Any = model_a(**__lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowerCAmelCase__ : Optional[int] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowerCAmelCase__ : Any = dict(model.named_parameters() )
lowerCAmelCase__ : List[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
lowerCAmelCase__ : Optional[int] = model.to(__lowerCamelCase )
model.eval()
if torch_device == "mps":
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
else:
lowerCAmelCase__ : Dict = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
lowerCAmelCase__ : Dict = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : Optional[int] = image.to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,sample_posterior=__lowerCamelCase ,generator=__lowerCamelCase ).sample
lowerCAmelCase__ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowerCAmelCase__ : Tuple = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
lowerCAmelCase__ : str = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
lowerCAmelCase__ : Tuple = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
@slow
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> int:
"""simple docstring"""
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__lowerCamelCase ) for s in shape] )}.npy"""
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ (self ,__lowerCamelCase=0 ,__lowerCamelCase=(4, 3, 5_12, 5_12) ,__lowerCamelCase=False ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = torch.floataa if fpaa else torch.floataa
lowerCAmelCase__ : Optional[Any] = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowerCamelCase ,__lowerCamelCase ) ) ).to(__lowerCamelCase ).to(__lowerCamelCase )
return image
def lowerCAmelCase__ (self ,__lowerCamelCase="CompVis/stable-diffusion-v1-4" ,__lowerCamelCase=False ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = '''fp16''' if fpaa else None
lowerCAmelCase__ : Union[str, Any] = torch.floataa if fpaa else torch.floataa
lowerCAmelCase__ : Dict = AutoencoderKL.from_pretrained(
__lowerCamelCase ,subfolder='''vae''' ,torch_dtype=__lowerCamelCase ,revision=__lowerCamelCase ,)
model.to(__lowerCamelCase ).eval()
return model
def lowerCAmelCase__ (self ,__lowerCamelCase=0 ) -> Dict:
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(__lowerCamelCase )
return torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : str = self.get_sd_vae_model()
lowerCAmelCase__ : Union[str, Any] = self.get_sd_image(__lowerCamelCase )
lowerCAmelCase__ : Tuple = self.get_generator(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : str = model(__lowerCamelCase ,generator=__lowerCamelCase ,sample_posterior=__lowerCamelCase ).sample
assert sample.shape == image.shape
lowerCAmelCase__ : Any = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCAmelCase__ : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.get_sd_vae_model(fpaa=__lowerCamelCase )
lowerCAmelCase__ : int = self.get_sd_image(__lowerCamelCase ,fpaa=__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = self.get_generator(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,generator=__lowerCamelCase ,sample_posterior=__lowerCamelCase ).sample
assert sample.shape == image.shape
lowerCAmelCase__ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCAmelCase__ : Union[str, Any] = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = self.get_sd_vae_model()
lowerCAmelCase__ : Union[str, Any] = self.get_sd_image(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ).sample
assert sample.shape == image.shape
lowerCAmelCase__ : int = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCAmelCase__ : Any = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.get_sd_vae_model()
lowerCAmelCase__ : Optional[int] = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
lowerCAmelCase__ : Dict = sample[-1, -2:, :2, -2:].flatten().cpu()
lowerCAmelCase__ : int = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_sd_vae_model(fpaa=__lowerCamelCase )
lowerCAmelCase__ : Dict = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) ,fpaa=__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Dict = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
lowerCAmelCase__ : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCAmelCase__ : Union[str, Any] = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='''xformers is not required when using PyTorch 2.0.''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.get_sd_vae_model(fpaa=__lowerCamelCase )
lowerCAmelCase__ : Any = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) ,fpaa=__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[str] = model.decode(__lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='''xformers is not required when using PyTorch 2.0.''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.get_sd_vae_model()
lowerCAmelCase__ : int = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCAmelCase__ : Any = model.decode(__lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCAmelCase__ : Dict = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.get_sd_vae_model()
lowerCAmelCase__ : Tuple = self.get_sd_image(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = self.get_generator(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model.encode(__lowerCamelCase ).latent_dist
lowerCAmelCase__ : int = dist.sample(generator=__lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowerCAmelCase__ : List[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
lowerCAmelCase__ : List[str] = torch.tensor(__lowerCamelCase )
lowerCAmelCase__ : int = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=__lowerCamelCase )
| 94 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =XLMProphetNetTokenizer
snake_case_ =False
snake_case_ =True
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : int = XLMProphetNetTokenizer(__lowerCamelCase ,keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = '''[PAD]'''
lowerCAmelCase__ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) ,__lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''[PAD]''' )
self.assertEqual(vocab_keys[1] ,'''[CLS]''' )
self.assertEqual(vocab_keys[-1] ,'''j''' )
self.assertEqual(len(__lowerCamelCase ) ,10_12 )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,10_12 )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = XLMProphetNetTokenizer(__lowerCamelCase ,keep_accents=__lowerCamelCase )
lowerCAmelCase__ : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
lowerCAmelCase__ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowerCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] ,)
lowerCAmelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] ,)
@cached_property
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = '''Hello World!'''
lowerCAmelCase__ : str = [3_53_89, 66_72, 49, 2]
self.assertListEqual(__lowerCamelCase ,self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = {'''input_ids''': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase ,model_name='''microsoft/xprophetnet-large-wiki100-cased''' ,revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' ,)
| 94 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__:
def __init__( self : Any , __snake_case : Union[str, Any] , __snake_case : Union[str, Any]=13 , __snake_case : List[Any]=7 , __snake_case : Tuple=True , __snake_case : Tuple=True , __snake_case : List[Any]=True , __snake_case : Optional[int]=True , __snake_case : Tuple=99 , __snake_case : Dict=32 , __snake_case : str=2 , __snake_case : List[Any]=4 , __snake_case : int=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : Any=0.1 , __snake_case : Dict=5_12 , __snake_case : Dict=16 , __snake_case : List[str]=2 , __snake_case : List[str]=0.02 , __snake_case : Optional[Any]=3 , __snake_case : Optional[Any]=4 , __snake_case : Dict=None , ):
a : List[str] = parent
a : Optional[int] = 13
a : List[str] = 7
a : int = True
a : List[str] = True
a : List[str] = True
a : List[Any] = True
a : int = 99
a : List[str] = 3_84
a : Optional[Any] = 2
a : Optional[Any] = 4
a : str = 37
a : List[Any] = 'gelu'
a : Optional[int] = 0.1
a : Tuple = 0.1
a : Dict = 5_12
a : Tuple = 16
a : Optional[int] = 2
a : str = 0.02
a : Any = 3
a : Tuple = 4
a : Optional[int] = 1_28
a : int = 2
a : Optional[Any] = 9
a : Optional[int] = 1
a : Tuple = None
def lowercase_ ( self : List[Any] ):
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : int = None
if self.use_input_mask:
a : Dict = random_attention_mask([self.batch_size, self.seq_length] )
a : Tuple = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : str = None
a : str = None
a : Tuple = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : str = ids_tensor([self.batch_size] , self.num_choices )
a : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : str ):
a : int = TFConvBertModel(config=__snake_case )
a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a : Dict = [input_ids, input_mask]
a : Union[str, Any] = model(__snake_case )
a : str = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Dict , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : int ):
a : Tuple = TFConvBertForMaskedLM(config=__snake_case )
a : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a : List[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Optional[int] , __snake_case : List[Any] , __snake_case : str , __snake_case : List[Any] , __snake_case : str , __snake_case : str , __snake_case : List[Any] , __snake_case : List[Any] ):
a : Optional[int] = self.num_labels
a : int = TFConvBertForSequenceClassification(config=__snake_case )
a : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a : List[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : List[str] , __snake_case : str , __snake_case : str , __snake_case : Tuple , __snake_case : Dict ):
a : Any = self.num_choices
a : str = TFConvBertForMultipleChoice(config=__snake_case )
a : int = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
a : List[Any] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
a : str = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
a : Any = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
a : Union[str, Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : str , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : str ):
a : Any = self.num_labels
a : List[Any] = TFConvBertForTokenClassification(config=__snake_case )
a : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a : List[str] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : int , __snake_case : str , __snake_case : int , __snake_case : int , __snake_case : Optional[int] , __snake_case : str , __snake_case : Any , __snake_case : List[Any] ):
a : List[Any] = TFConvBertForQuestionAnswering(config=__snake_case )
a : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a : Any = model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Optional[Any] ):
a : Union[str, Any] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : List[str] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase_ ( self : Dict ):
a : Tuple = TFConvBertModelTester(self )
a : Tuple = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowercase_ ( self : int ):
self.config_tester.run_common_tests()
def lowercase_ ( self : List[Any] ):
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase_ ( self : Tuple ):
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def lowercase_ ( self : Dict ):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def lowercase_ ( self : Dict ):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def lowercase_ ( self : Optional[Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def lowercase_ ( self : Any ):
a , a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : List[str] = True
a : Optional[Any] = True
if hasattr(__snake_case , 'use_cache' ):
a : List[str] = True
a : Tuple = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
a : List[Any] = getattr(self.model_tester , 'key_length' , __snake_case )
for model_class in self.all_model_classes:
a : str = self._prepare_for_class(__snake_case , __snake_case )
a : List[Any] = model_class(__snake_case )
a : Optional[Any] = len(model(__snake_case ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case , saved_model=__snake_case )
a : Optional[int] = os.path.join(__snake_case , 'saved_model' , '1' )
a : str = tf.keras.models.load_model(__snake_case )
a : Any = model(__snake_case )
if self.is_encoder_decoder:
a : Union[str, Any] = outputs['encoder_hidden_states']
a : str = outputs['encoder_attentions']
else:
a : str = outputs['hidden_states']
a : Optional[Any] = outputs['attentions']
self.assertEqual(len(__snake_case ) , __snake_case )
a : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowercase_ ( self : List[Any] ):
a : Optional[int] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__snake_case )
def lowercase_ ( self : Optional[Any] ):
a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a : Any = True
a : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
a : Optional[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
a : Any = getattr(self.model_tester , 'key_length' , __snake_case )
a : Tuple = getattr(self.model_tester , 'key_length' , __snake_case )
def check_decoder_attentions_output(__snake_case : Dict ):
a : Optional[int] = len(__snake_case )
self.assertEqual(out_len % 2 , 0 )
a : List[Any] = outputs.decoder_attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__snake_case : Tuple ):
a : Union[str, Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
a : Union[str, Any] = True
a : List[str] = False
a : Any = model_class(__snake_case )
a : Optional[int] = model(self._prepare_for_class(__snake_case , __snake_case ) )
a : List[Any] = len(__snake_case )
self.assertEqual(config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
if self.is_encoder_decoder:
a : int = model_class(__snake_case )
a : Optional[Any] = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(config.output_hidden_states , __snake_case )
check_decoder_attentions_output(__snake_case )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a : List[Any] = True
a : Optional[int] = model_class(__snake_case )
a : Any = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
# Check attention is always last and order is fine
a : List[str] = True
a : List[str] = True
a : Tuple = model_class(__snake_case )
a : Tuple = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__snake_case ) )
self.assertEqual(model.config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
@require_tf
class a__( unittest.TestCase ):
@slow
def lowercase_ ( self : Tuple ):
a : Union[str, Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
a : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
a : str = model(__snake_case )[0]
a : Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , __snake_case )
a : Any = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1e-4 ) | 297 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A , _A , _A , _A , ):
a : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
a : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy)
a : Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
a : int = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase: Optional[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
) | 297 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __snake_case ( unittest.TestCase ):
a__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__: Optional[int] = TextaTextGenerationPipeline(model=lowercase , tokenizer=lowercase)
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , lowercase , lowercase) -> int:
'''simple docstring'''
a__: Union[str, Any] = generator('Something there')
self.assertEqual(lowercase , [{'generated_text': ANY(lowercase)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there'))
a__: Optional[Any] = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=lowercase)
self.assertEqual(
lowercase , [
[{'generated_text': ANY(lowercase)}, {'generated_text': ANY(lowercase)}],
[{'generated_text': ANY(lowercase)}, {'generated_text': ANY(lowercase)}],
] , )
a__: List[Any] = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase)
self.assertEqual(
lowercase , [
[{'generated_text': ANY(lowercase)}, {'generated_text': ANY(lowercase)}],
[{'generated_text': ANY(lowercase)}, {'generated_text': ANY(lowercase)}],
] , )
with self.assertRaises(lowercase):
generator(4)
@require_torch
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Any = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt')
# do_sample=False necessary for reproducibility
a__: int = generator('Something there' , do_sample=lowercase)
self.assertEqual(lowercase , [{'generated_text': ''}])
a__: int = 3
a__: str = generator(
'Something there' , num_return_sequences=lowercase , num_beams=lowercase , )
a__: int = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(lowercase , lowercase)
a__: Optional[int] = generator('This is a test' , do_sample=lowercase , num_return_sequences=2 , return_tensors=lowercase)
self.assertEqual(
lowercase , [
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
] , )
a__: int = generator.model.config.eos_token_id
a__: Optional[Any] = '<pad>'
a__: Optional[int] = generator(
['This is a test', 'This is a second test'] , do_sample=lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase , )
self.assertEqual(
lowercase , [
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
] , )
@require_tf
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Union[str, Any] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf')
# do_sample=False necessary for reproducibility
a__: str = generator('Something there' , do_sample=lowercase)
self.assertEqual(lowercase , [{'generated_text': ''}])
| 203 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase__ = None
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowercase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowercase__ = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
lowercase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["""input_ids""", """attention_mask"""]
a__ = MBartTokenizer
a__ = []
a__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Tuple:
'''simple docstring'''
a__: List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
a__: Tuple = vocab_file
a__: Union[str, Any] = False if not self.vocab_file else True
a__: Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens})
a__: int = {
lang_code: self.convert_tokens_to_ids(lowercase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a__: List[Any] = src_lang if src_lang is not None else 'en_XX'
a__: Tuple = self.convert_tokens_to_ids(self._src_lang)
a__: str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: Any = [self.sep_token_id]
a__: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
a__: Union[str, Any] = src_lang
a__: Any = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase)
a__: str = self.convert_tokens_to_ids(lowercase)
a__: Any = tgt_lang_id
return inputs
def lowerCamelCase_ ( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
'''simple docstring'''
a__: Any = src_lang
a__: List[Any] = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: int = self.convert_tokens_to_ids(lowercase)
a__: List[Any] = []
a__: List[str] = [self.eos_token_id, self.cur_lang_code]
a__: Dict = self.convert_ids_to_tokens(self.prefix_tokens)
a__: Any = self.convert_ids_to_tokens(self.suffix_tokens)
a__: int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: str = self.convert_tokens_to_ids(lowercase)
a__: List[Any] = []
a__: Dict = [self.eos_token_id, self.cur_lang_code]
a__: Any = self.convert_ids_to_tokens(self.prefix_tokens)
a__: Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens)
a__: str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowercase):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.')
return
a__: Any = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase):
copyfile(self.vocab_file , lowercase)
return (out_vocab_file,)
| 203 | 1 |
from __future__ import annotations
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
return [ord(__lowercase ) - 96 for elem in plain]
def UpperCamelCase ( __lowercase : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : int = encode(input('-> ' ).strip().lower() )
print('Encoded: ' ,__lowercase )
print('Decoded:' ,decode(__lowercase ) )
if __name__ == "__main__":
main()
| 140 | import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
_UpperCAmelCase = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase = {
"""RUCAIBox/mvp""": 1024,
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['''input_ids''', '''attention_mask''']
lowerCamelCase_ = MvpTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ):
"""simple docstring"""
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
A_ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase ) != add_prefix_space:
A_ : int = getattr(lowercase , pre_tok_state.pop('type' ) )
A_ : Union[str, Any] = add_prefix_space
A_ : Dict = pre_tok_class(**lowercase )
A_ : Union[str, Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : Any = 'post_processor'
A_ : List[str] = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
A_ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : int = tuple(state['sep'] )
if "cls" in state:
A_ : Optional[int] = tuple(state['cls'] )
A_ : Tuple = False
if state.get('add_prefix_space' , lowercase ) != add_prefix_space:
A_ : Union[str, Any] = add_prefix_space
A_ : Tuple = True
if state.get('trim_offsets' , lowercase ) != trim_offsets:
A_ : str = trim_offsets
A_ : str = True
if changes_to_apply:
A_ : List[str] = getattr(lowercase , state.pop('type' ) )
A_ : List[str] = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
A_ : Dict = value
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
A_ : Any = kwargs.get('is_split_into_words' , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase , **lowercase )
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
A_ : Dict = kwargs.get('is_split_into_words' , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Any = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
A_ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 140 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A_ = 25_60_47
A_ = 25_61_45
@require_sentencepiece
@require_tokenizers
class lowercase( _a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = NllbTokenizer
lowercase__ = NllbTokenizerFast
lowercase__ = True
lowercase__ = True
lowercase__ = {}
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : Union[str, Any] = NllbTokenizer(__lowerCamelCase, keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = NllbTokenizer(__lowerCamelCase, keep_accents=__lowerCamelCase )
_snake_case : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowerCamelCase, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
_snake_case : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowerCamelCase, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
], )
_snake_case : str = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
_snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
], )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase, **__lowerCamelCase )
_snake_case : Optional[int] = self.tokenizer_class.from_pretrained(__lowerCamelCase, **__lowerCamelCase )
_snake_case : Any = tempfile.mkdtemp()
_snake_case : Union[str, Any] = tokenizer_r.save_pretrained(__lowerCamelCase )
_snake_case : Any = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
_snake_case : List[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__lowerCamelCase, __lowerCamelCase )
# Checks everything loads correctly in the same way
_snake_case : Dict = tokenizer_r.from_pretrained(__lowerCamelCase )
_snake_case : List[Any] = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase, __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : Optional[int] = tokenizer_r.save_pretrained(__lowerCamelCase, legacy_format=__lowerCamelCase )
_snake_case : Dict = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase, __lowerCamelCase )
# Checks everything loads correctly in the same way
_snake_case : List[str] = tokenizer_r.from_pretrained(__lowerCamelCase )
_snake_case : Dict = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase, __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
_snake_case : Tuple = tempfile.mkdtemp()
_snake_case : Union[str, Any] = tokenizer_r.save_pretrained(__lowerCamelCase, legacy_format=__lowerCamelCase )
_snake_case : List[str] = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_snake_case : Optional[int] = tokenizer_r.from_pretrained(__lowerCamelCase )
_snake_case : int = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase, __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@require_torch
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.test_seqaseq:
return
_snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
_snake_case : Optional[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
_snake_case : Union[str, Any] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
_snake_case : Any = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCamelCase, tgt_texts=__lowerCamelCase, max_length=3, max_target_length=10, return_tensors="""pt""", src_lang="""eng_Latn""", tgt_lang="""ron_Latn""", )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.labels.shape[1], 10 )
# max_target_length will default to max_length if not specified
_snake_case : List[str] = tokenizer.prepare_seqaseq_batch(
__lowerCamelCase, tgt_texts=__lowerCamelCase, max_length=3, return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.labels.shape[1], 3 )
_snake_case : Tuple = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCamelCase, max_length=3, max_target_length=10, return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3 )
self.assertNotIn("""decoder_input_ids""", __lowerCamelCase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : List[str] = [AddedToken("""<special>""", lstrip=__lowerCamelCase )]
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase, additional_special_tokens=__lowerCamelCase, **__lowerCamelCase )
_snake_case : Optional[int] = tokenizer_r.encode("""Hey this is a <special> token""" )
_snake_case : List[Any] = tokenizer_r.encode("""<special>""", add_special_tokens=__lowerCamelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
_snake_case : str = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase, additional_special_tokens=__lowerCamelCase, **__lowerCamelCase, )
_snake_case : Dict = self.tokenizer_class.from_pretrained(
__lowerCamelCase, additional_special_tokens=__lowerCamelCase, **__lowerCamelCase )
_snake_case : Union[str, Any] = tokenizer_p.encode("""Hey this is a <special> token""" )
_snake_case : List[str] = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(__lowerCamelCase, __lowerCamelCase )
self.assertEqual(__lowerCamelCase, __lowerCamelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase( unittest.TestCase ):
'''simple docstring'''
lowercase__ = """facebook/nllb-200-distilled-600M"""
lowercase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowercase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowercase__ = [
25_60_47,
1_62_97,
13_44_08,
81_65,
24_80_66,
1_47_34,
9_50,
11_35,
10_57_21,
35_73,
83,
2_73_52,
1_08,
4_94_86,
2,
]
@classmethod
def UpperCamelCase_ ( cls: Any ):
'''simple docstring'''
_snake_case : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="""eng_Latn""", tgt_lang="""ron_Latn""" )
_snake_case : List[Any] = 1
return cls
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""], 256_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""], 256_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""], 256_057 )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, __lowerCamelCase )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
self.assertIn(__lowerCamelCase, self.tokenizer.all_special_ids )
# fmt: off
_snake_case : Optional[int] = [RO_CODE, 4_254, 98_068, 112_923, 39_072, 3_909, 713, 102_767, 26, 17_314, 35_642, 14_683, 33_118, 2_022, 66_987, 2, 256_047]
# fmt: on
_snake_case : List[Any] = self.tokenizer.decode(__lowerCamelCase, skip_special_tokens=__lowerCamelCase )
_snake_case : int = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase, __lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, __lowerCamelCase )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0], __lowerCamelCase )
_snake_case : int = 10
_snake_case : List[Any] = self.tokenizer(__lowerCamelCase, max_length=__lowerCamelCase, truncation=__lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-1], 2 )
self.assertEqual(ids[0], __lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ), __lowerCamelCase )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ), [256_203, 3] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase )
_snake_case : Dict = NllbTokenizer.from_pretrained(__lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, __lowerCamelCase )
@require_torch
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=__lowerCamelCase, truncation=__lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors="""pt""", )
_snake_case : Tuple = shift_tokens_right(
batch["""labels"""], self.tokenizer.pad_token_id, self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(__lowerCamelCase, __lowerCamelCase )
self.assertEqual((2, 15), batch.input_ids.shape )
self.assertEqual((2, 15), batch.attention_mask.shape )
_snake_case : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, __lowerCamelCase )
self.assertEqual(__lowerCamelCase, batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(self.src_text, padding=__lowerCamelCase, truncation=__lowerCamelCase, max_length=3, return_tensors="""pt""" )
_snake_case : int = self.tokenizer(
text_target=self.tgt_text, padding=__lowerCamelCase, truncation=__lowerCamelCase, max_length=10, return_tensors="""pt""" )
_snake_case : str = targets["""input_ids"""]
_snake_case : Dict = shift_tokens_right(
__lowerCamelCase, self.tokenizer.pad_token_id, decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang], )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer._build_translation_inputs(
"""A test""", return_tensors="""pt""", src_lang="""eng_Latn""", tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(__lowerCamelCase ), {
# A, test, EOS, en_XX
"""input_ids""": [[256_047, 70, 7_356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 256_057,
}, )
@require_torch
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = True
_snake_case : Tuple = self.tokenizer(
"""UN Chief says there is no military solution in Syria""", src_lang="""eng_Latn""", tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids, [16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2, 256_047] )
_snake_case : Optional[int] = False
_snake_case : Dict = self.tokenizer(
"""UN Chief says there is no military solution in Syria""", src_lang="""eng_Latn""", tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids, [256_047, 16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2] )
| 368 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : int=False ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
_snake_case : Dict = os.path.abspath(snake_case__ )
logger.info(F"Loading PyTorch weights from {pt_path}" )
_snake_case : Tuple = torch.load(snake_case__ , map_location="""cpu""" )
logger.info(F"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
_snake_case : int = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
_snake_case : Dict = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ )
return flax_state_dict
def UpperCAmelCase__ (snake_case__ : Tuple[str] , snake_case__ : np.ndarray , snake_case__ : Dict[str, jnp.ndarray] , snake_case__ : str , ):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(snake_case__ : Tuple[str] ) -> bool:
return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
_snake_case : Any = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
_snake_case : Optional[Any] = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
_snake_case : Any = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
_snake_case : Any = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
_snake_case : Optional[int] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ):
_snake_case : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_snake_case : List[str] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ):
_snake_case : List[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_snake_case : List[Any] = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_snake_case : Tuple = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
_snake_case : Optional[Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
_snake_case : Union[str, Any] = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
_snake_case : Dict = pt_tuple_key[-2] + """_v"""
if name is not None:
_snake_case : Union[str, Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
_snake_case : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
_snake_case : Dict = flax_model.params["""params"""]
else:
_snake_case : List[Any] = flax_model.params
_snake_case : Tuple = flatten_dict(snake_case__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_snake_case : Union[str, Any] = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(snake_case__ )
_snake_case : Tuple = {}
_snake_case : Dict = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_snake_case : Optional[int] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_snake_case : int = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_snake_case : Optional[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_snake_case : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
_snake_case , _snake_case : int = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
_snake_case : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_snake_case : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
_snake_case : Union[str, Any] = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
_snake_case : List[Any] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
_snake_case : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
import torch
# Load the index
_snake_case : str = {}
for shard_file in shard_filenames:
# load using msgpack utils
_snake_case : Union[str, Any] = torch.load(snake_case__ )
_snake_case : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
_snake_case : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_snake_case : str = flax_model.params["""params"""]
_snake_case : List[Any] = flatten_dict(snake_case__ )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
_snake_case : List[Any] = flax_model.params
_snake_case : Tuple = flatten_dict(snake_case__ )
_snake_case : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_snake_case : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_snake_case : List[str] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_snake_case : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_snake_case : Optional[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
_snake_case , _snake_case : Optional[Any] = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
_snake_case : List[str] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_snake_case : Any = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
_snake_case : Optional[int] = jnp.asarray(snake_case__ )
continue
if "var" in flax_key[-1]:
_snake_case : Any = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
_snake_case : List[str] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
_snake_case : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : Optional[Any] = os.path.abspath(snake_case__ )
logger.info(F"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
_snake_case : Union[str, Any] = getattr(snake_case__ , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(snake_case__ , """rb""" ) as state_f:
try:
_snake_case : Dict = from_bytes(snake_case__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Optional[int] ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_snake_case : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_snake_case : Optional[int] = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
_snake_case : Dict = flatten_dict(snake_case__ )
_snake_case : Optional[Any] = pt_model.state_dict()
_snake_case : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
_snake_case : Optional[int] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
_snake_case : str = []
_snake_case : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_snake_case : Tuple = flax_key_tuple[0] == pt_model.base_model_prefix
_snake_case : Optional[Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
_snake_case : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
_snake_case : Union[str, Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict:
# conv layer
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
_snake_case : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict:
# linear layer
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
_snake_case : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : int = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
_snake_case : Tuple = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
_snake_case : int = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
_snake_case : int = """.""".join(snake_case__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
_snake_case : Optional[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
_snake_case : List[str] = key.split(""".""" )
_snake_case : Optional[int] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
_snake_case : int = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
_snake_case : Union[str, Any] = key_components[-2] + """_v"""
if name is not None:
_snake_case : Dict = key_components[:-3] + [name]
_snake_case : Dict = """.""".join(snake_case__ )
_snake_case : str = key
if flax_key in special_pt_names:
_snake_case : Union[str, Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
_snake_case : List[str] = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
_snake_case : List[Any] = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
_snake_case : List[str] = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(snake_case__ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
else:
logger.warning(
F"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
"""If your task is similar to the task the model of the checkpoint was trained on, """
F"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 132 | 0 |
'''simple docstring'''
lowerCamelCase : List[Any] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 2 |
"""simple docstring"""
from collections import defaultdict
class _UpperCAmelCase:
def __init__( self , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_UpperCamelCase = [
[-1 for i in range(total + 1)] for j in range(2 ** len(__a))
]
_UpperCamelCase = defaultdict(__a) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_UpperCamelCase = (1 << len(__a)) - 1
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_UpperCamelCase = self.count_ways_until(__a , task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1)
# save the value.
_UpperCamelCase = total_ways_util
return self.dp[mask][task_no]
def UpperCAmelCase ( self , __a) -> int:
'''simple docstring'''
# Store the list of persons for each task
for i in range(len(__a)):
for j in task_performed[i]:
self.task[j].append(__a)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1)
if __name__ == "__main__":
_a = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_a = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 194 | 0 |
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowercase ( __lowerCAmelCase : dict[int, list[int]] ):
a__ = 0
a__ = len(__lowerCAmelCase ) # No of vertices in graph
a__ = [0] * n
a__ = [False] * n
def dfs(__lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
a__ = True
a__ = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
a__ = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
a__ = min(low[at] , low[to] )
a__ = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = '''visual_bert'''
def __init__( self :Optional[int] ,__snake_case :Any=3_05_22 ,__snake_case :str=7_68 ,__snake_case :Any=5_12 ,__snake_case :Any=12 ,__snake_case :int=12 ,__snake_case :str=30_72 ,__snake_case :int="gelu" ,__snake_case :Optional[int]=0.1 ,__snake_case :str=0.1 ,__snake_case :Union[str, Any]=5_12 ,__snake_case :Tuple=2 ,__snake_case :Union[str, Any]=0.02 ,__snake_case :Optional[Any]=1E-12 ,__snake_case :Optional[Any]=False ,__snake_case :int=True ,__snake_case :Any=1 ,__snake_case :Optional[int]=0 ,__snake_case :Tuple=2 ,**__snake_case :Any ,) -> Union[str, Any]:
super().__init__(pad_token_id=__snake_case ,bos_token_id=__snake_case ,eos_token_id=__snake_case ,**__snake_case )
a__ = vocab_size
a__ = max_position_embeddings
a__ = hidden_size
a__ = visual_embedding_dim
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = type_vocab_size
a__ = layer_norm_eps
a__ = bypass_transformer
a__ = special_visual_initialize
| 109 | 1 |
def __lowerCamelCase ( UpperCAmelCase_ : list ):
"""simple docstring"""
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
a :Tuple = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
a :str = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
a :Optional[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
a , a :str = arr[i], arr[0]
else:
a , a :List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
a :int = 0
else:
a :List[str] = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
snake_case : Any = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 94 |
def __lowerCamelCase ( UpperCAmelCase_ : int = 1000 ):
"""simple docstring"""
a , a :int = 1, 1
a :Any = 2
while True:
a :Optional[int] = 0
a :str = fa + fa
a , a :List[Any] = fa, f
index += 1
for _ in str(UpperCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 94 | 1 |
'''simple docstring'''
from math import ceil
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : List[str] = list(range(0 , _lowerCamelCase ) )
__snake_case : Dict = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__snake_case : Dict = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
__snake_case : Union[str, Any] = [i for i in blocks if i not in device_map_blocks]
__snake_case : Tuple = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = list(range(_lowerCamelCase ) )
__snake_case : int = int(ceil(n_layers / len(_lowerCamelCase ) ) )
__snake_case : Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , _lowerCamelCase , _lowerCamelCase )]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
| 361 |
'''simple docstring'''
import os
import numpy
import onnx
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = a.name
__snake_case : Dict = b.name
__snake_case : Optional[int] = """"""
__snake_case : int = """"""
__snake_case : Any = a == b
__snake_case : List[Any] = name_a
__snake_case : List[str] = name_b
return res
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCamelCase , _lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = list(model.graph.initializer )
__snake_case : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case : Tuple = inits[i].name
__snake_case : Tuple = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : str = os.path.dirname(_lowerCamelCase )
__snake_case : Dict = os.path.basename(_lowerCamelCase )
__snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) )
__snake_case : Dict = list(model.graph.initializer )
__snake_case : Optional[int] = set()
__snake_case : Optional[Any] = {}
__snake_case : Tuple = []
__snake_case : List[Any] = 0
for i in range(len(_lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCamelCase )
dup_set.add(_lowerCamelCase )
__snake_case : List[Any] = inits[j].data_type
__snake_case : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , _lowerCamelCase )
total_reduced_size += mem_size
__snake_case : Any = inits[i].name
__snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCamelCase )
else:
__snake_case : Dict = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
__snake_case : int = sorted(_lowerCamelCase )
_remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case : str = """optimized_""" + model_file_name
__snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
onnx.save(_lowerCamelCase , _lowerCamelCase )
return new_model
| 13 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[str] = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase : Any = '''LayoutLMv3ImageProcessor'''
__UpperCAmelCase : List[str] = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase__ , )
snake_case : Optional[int] = kwargs.pop("feature_extractor" )
snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
snake_case : Optional[int] = self.image_processor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : str = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case : Optional[Any] = features["words"]
snake_case : Dict = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel values
snake_case : Dict = features.pop("pixel_values" )
if return_overflowing_tokens is True:
snake_case : Optional[Any] = self.get_overflowing_images(UpperCamelCase__ , encoded_inputs["overflow_to_sample_mapping"] )
snake_case : Any = images
return encoded_inputs
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F' {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}' )
return images_with_overflow
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase__ , )
return self.image_processor_class
@property
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase__ , )
return self.image_processor
| 203 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Optional[Any] = '''xmod'''
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=("en_XX",) , UpperCamelCase__=None , **UpperCamelCase__ , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
snake_case : List[Any] = vocab_size
snake_case : List[Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : List[str] = hidden_act
snake_case : Union[str, Any] = intermediate_size
snake_case : int = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Optional[int] = max_position_embeddings
snake_case : Tuple = type_vocab_size
snake_case : List[str] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[Any] = position_embedding_type
snake_case : int = use_cache
snake_case : Dict = classifier_dropout
snake_case : Dict = pre_norm
snake_case : Union[str, Any] = adapter_reduction_factor
snake_case : Any = adapter_layer_norm
snake_case : Optional[int] = adapter_reuse_layer_norm
snake_case : List[Any] = ln_before_adapter
snake_case : str = list(UpperCamelCase__ )
snake_case : int = default_language
class _lowerCAmelCase ( snake_case_ ):
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 203 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class a ( a__ , unittest.TestCase ):
snake_case__ = CpmAntTokenizer
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
lowerCAmelCase = '今天天气真好!'
lowerCAmelCase = ['今天', '天气', '真', '好', '!']
lowerCAmelCase = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
lowerCAmelCase = '今天天气真好!'
lowerCAmelCase = [tokenizer.bos_token] + tokens
lowerCAmelCase = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
lowerCAmelCase = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 363 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309 | 0 |
import argparse
from collections import defaultdict
import yaml
__lowerCamelCase = """docs/source/en/_toctree.yml"""
def UpperCamelCase ( __lowerCamelCase : List[Any] ):
snake_case : Any = defaultdict(__lowerCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case : Optional[Any] = [key for key, value in counts.items() if value > 1]
snake_case : int = []
for duplicate_key in duplicates:
snake_case : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__lowerCamelCase ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__lowerCamelCase , key=lambda __lowerCamelCase : s["title"].lower() )
def UpperCamelCase ( __lowerCamelCase : List[Any]=False ):
with open(__lowerCamelCase , encoding="utf-8" ) as f:
snake_case : Any = yaml.safe_load(f.read() )
# Get to the API doc
snake_case : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case : str = content[api_idx]["sections"]
# Then to the model doc
snake_case : Dict = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case : Dict = api_doc[model_idx]["sections"]
snake_case : Tuple = [(idx, section) for idx, section in enumerate(__lowerCamelCase ) if "sections" in section]
snake_case : Tuple = False
for idx, modality_doc in modalities_docs:
snake_case : Dict = modality_doc["sections"]
snake_case : int = clean_model_doc_toc(__lowerCamelCase )
if old_modality_doc != new_modality_doc:
snake_case : int = True
if overwrite:
snake_case : Tuple = new_modality_doc
if diff:
if overwrite:
snake_case : int = model_doc
snake_case : Union[str, Any] = api_doc
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__lowerCamelCase , allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCamelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 59 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
a :Optional[int] = ["text", "image", "audio"]
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
inputs.append(create_inputs(__lowerCAmelCase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def _lowercase ( __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple = []
for output in outputs:
if isinstance(__lowerCAmelCase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __a :
'''simple docstring'''
def _a ( self ) -> str:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE__ : Dict = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tool(*_a )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE__ : List[Any] = [outputs]
self.assertListEqual(output_types(_a ) , self.tool.outputs )
def _a ( self ) -> List[Any]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : Dict = self.tool(*_a )
if not isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
for output, output_type in zip(_a , self.tool.outputs ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_a , _a ) )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : List[Any] = []
for _input, input_type in zip(_a , self.tool.inputs ):
if isinstance(_a , _a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tool(*_a )
if not isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
| 132 | 0 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase_ : Dict = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def __A ( lowerCAmelCase_=None ):
if subparsers is not None:
_UpperCAmelCase : Union[str, Any] = subparsers.add_parser("""tpu-config""" , description=_description )
else:
_UpperCAmelCase : Tuple = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
_UpperCAmelCase : Dict = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=lowerCAmelCase_ , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=lowerCAmelCase_ , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
_UpperCAmelCase : Tuple = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=lowerCAmelCase_ , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_UpperCAmelCase : str = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_UpperCAmelCase : int = defaults.command_file
if not args.command and defaults.commands is not None:
_UpperCAmelCase : Dict = defaults.commands
if not args.tpu_name:
_UpperCAmelCase : Any = defaults.tpu_name
if not args.tpu_zone:
_UpperCAmelCase : Optional[int] = defaults.tpu_zone
if args.accelerate_version == "dev":
_UpperCAmelCase : Optional[int] = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
_UpperCAmelCase : Tuple = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = f"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
_UpperCAmelCase : Union[str, Any] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_UpperCAmelCase : Tuple = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f"pip install {args.accelerate_version}"]
new_cmd += args.command
_UpperCAmelCase : str = """; """.join(lowerCAmelCase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_UpperCAmelCase : List[str] = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"Running {' '.join(lowerCAmelCase_ )}" )
return
subprocess.run(lowerCAmelCase_ )
print("""Successfully setup pod.""" )
def __A ( ):
_UpperCAmelCase : Tuple = tpu_command_parser()
_UpperCAmelCase : Dict = parser.parse_args()
tpu_command_launcher(lowerCAmelCase_ )
| 170 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}." )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , stopping_strategy=lowerCAmelCase_ )
else:
return _interleave_iterable_datasets(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , stopping_strategy=lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , ):
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}." )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , axis=lowerCAmelCase_ )
else:
return _concatenate_iterable_datasets(lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , axis=lowerCAmelCase_ )
| 170 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: Any = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Dict = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Any=8 ):
UpperCAmelCase : Any = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if latents is None:
UpperCAmelCase : Optional[Any] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase : Optional[int] = latents.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
UpperCAmelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ) -> str:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCAmelCase : int = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
UpperCAmelCase : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int = self._execution_device
UpperCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
UpperCAmelCase : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase : Union[str, Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
UpperCAmelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.scheduler.timesteps
UpperCAmelCase : Optional[int] = self.unet.config.in_channels
UpperCAmelCase , UpperCAmelCase : Optional[int] = downscale_height_and_width(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
UpperCAmelCase : List[str] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : Any = {"""image_embeds""": image_embeds}
UpperCAmelCase : Dict = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase : List[str] = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase : Dict = variance_pred.chunk(2 )
UpperCAmelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Optional[int] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , )[0]
# post-processing
UpperCAmelCase : str = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase : int = image * 0.5 + 0.5
UpperCAmelCase : Optional[int] = image.clamp(0 , 1 )
UpperCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 109 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: Optional[int] = logging.get_logger(__name__)
A: Optional[int] = torch.device("cpu")
def _snake_case ( ):
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
def _snake_case ( UpperCamelCase : int ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : str ):
UpperCAmelCase : int = dct.pop(UpperCamelCase )
UpperCAmelCase : Any = val
def _snake_case ( UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
UpperCAmelCase : Optional[Any] = k
if ".pwconv" in k:
UpperCAmelCase : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCAmelCase : Tuple = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCAmelCase : List[Any] = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCAmelCase : Any = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCAmelCase : int = k_new.split(""".""" )
if ls[2].isdigit():
UpperCAmelCase : List[Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Optional[int] ):
UpperCAmelCase : List[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase : Optional[Any] = 1000
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[str] = """imagenet-1k-id2label.json"""
UpperCAmelCase : Dict = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase : Any = [3, 3, 6, 4]
UpperCAmelCase : List[str] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase : Dict = [3, 3, 9, 6]
UpperCAmelCase : Union[str, Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase : int = [4, 3, 10, 5]
UpperCAmelCase : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase : Union[str, Any] = [4, 4, 12, 6]
UpperCAmelCase : List[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCAmelCase : List[str] = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" , check_hash=UpperCamelCase )
else:
UpperCAmelCase : Any = torch.load(UpperCamelCase , map_location="""cpu""" )
UpperCAmelCase : Optional[Any] = checkpoint
UpperCAmelCase : Dict = create_rename_keys(UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load HuggingFace model
UpperCAmelCase : List[Any] = SwiftFormerForImageClassification(UpperCamelCase ).eval()
hf_model.load_state_dict(UpperCamelCase )
# prepare test inputs
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCAmelCase : Optional[int] = processor(images=UpperCamelCase , return_tensors="""pt""" )
# compare outputs from both models
UpperCAmelCase : Optional[int] = get_expected_output(UpperCamelCase )
UpperCAmelCase : List[str] = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase , atol=1e-3 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
A: str = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 109 | 1 |
import random
from typing import Any
def UpperCAmelCase_ ( __UpperCAmelCase : list ) -> list[Any]:
for _ in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase__ : Optional[int] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 210 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Dict = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 210 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class A ( unittest.TestCase ):
__UpperCAmelCase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowercase_ (self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
return generator, ["Something to write", "Something else"]
def lowercase_ (self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ) -> str:
"""simple docstring"""
UpperCAmelCase__ = generator("Something there" )
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ANY(lowerCAmelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
UpperCAmelCase__ = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__ )}, {"generated_text": ANY(lowerCAmelCase__ )}],
[{"generated_text": ANY(lowerCAmelCase__ )}, {"generated_text": ANY(lowerCAmelCase__ )}],
] , )
UpperCAmelCase__ = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__ )}, {"generated_text": ANY(lowerCAmelCase__ )}],
[{"generated_text": ANY(lowerCAmelCase__ )}, {"generated_text": ANY(lowerCAmelCase__ )}],
] , )
with self.assertRaises(lowerCAmelCase__ ):
generator(4 )
@require_torch
def lowercase_ (self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
UpperCAmelCase__ = generator("Something there" , do_sample=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}] )
UpperCAmelCase__ = 3
UpperCAmelCase__ = generator(
"Something there" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
UpperCAmelCase__ = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ = generator("This is a test" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
UpperCAmelCase__ = generator.model.config.eos_token_id
UpperCAmelCase__ = "<pad>"
UpperCAmelCase__ = generator(
["This is a test", "This is a second test"] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def lowercase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
UpperCAmelCase__ = generator("Something there" , do_sample=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}] )
| 65 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
for _ in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: List[str] = []
for step in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(_UpperCAmelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.load(_UpperCAmelCase )
scheduler.load_state_dict(_UpperCAmelCase )
return lrs
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0)
for _ in range(100):
SCREAMING_SNAKE_CASE_: Dict = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase__ , weight_decay=0.0 , relative_step=lowerCAmelCase__ , scale_parameter=lowerCAmelCase__ , warmup_init=lowerCAmelCase__ , )
for _ in range(1000):
SCREAMING_SNAKE_CASE_: List[Any] = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
_UpperCAmelCase : List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_UpperCAmelCase : Optional[Any] = 10
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=None):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ , msg=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE_: Dict = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = data
SCREAMING_SNAKE_CASE_: List[Any] = scheduler_func(self.optimizer , **lowerCAmelCase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
SCREAMING_SNAKE_CASE_: int = unwrap_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListAlmostEqual(
lowerCAmelCase__ , lowerCAmelCase__ , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE_: List[str] = scheduler_func(self.optimizer , **lowerCAmelCase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase__) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE_: Tuple = unwrap_and_save_reload_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ , msg=F"failed for {scheduler_func} in save and reload")
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: List[Any] = fn
def __call__( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Tuple):
return self.fn(*lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: str = list(map(self , scheduler.lr_lambdas))
| 13 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
"""Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""",
"""Salesforce/blip-vqa-capfit-large""": (
"""https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-base""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-large""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"""
),
"""Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""",
"""Salesforce/blip-itm-large-flikr""": (
"""https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"""
),
}
class A__ ( lowerCamelCase__ ):
A__ = 'blip_text_model'
def __init__( self : int , _a : Union[str, Any]=3_0524 , _a : Any=768 , _a : Dict=768 , _a : List[str]=3072 , _a : Union[str, Any]=768 , _a : Dict=12 , _a : Any=8 , _a : Union[str, Any]=512 , _a : Optional[Any]="gelu" , _a : Tuple=1e-12 , _a : str=0.0 , _a : Any=0.0 , _a : Tuple=0.02 , _a : int=3_0522 , _a : int=2 , _a : List[str]=0 , _a : int=102 , _a : str=True , _a : Any=True , **_a : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , sep_token_id=_a , **_a , )
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =encoder_hidden_size
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =projection_dim
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =is_decoder
_SCREAMING_SNAKE_CASE =use_cache
@classmethod
def A ( cls : Union[str, Any] , _a : List[str] , **_a : Optional[int] ) -> List[str]:
'''simple docstring'''
cls._set_token_in_kwargs(_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
_SCREAMING_SNAKE_CASE =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_a , **_a )
class A__ ( lowerCamelCase__ ):
A__ = 'blip_vision_model'
def __init__( self : Any , _a : int=768 , _a : List[Any]=3072 , _a : int=512 , _a : Optional[Any]=12 , _a : Optional[int]=12 , _a : Optional[int]=384 , _a : List[str]=16 , _a : Dict="gelu" , _a : Tuple=1e-5 , _a : Optional[int]=0.0 , _a : str=1e-10 , **_a : str , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =projection_dim
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =hidden_act
@classmethod
def A ( cls : Union[str, Any] , _a : str , **_a : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
cls._set_token_in_kwargs(_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
_SCREAMING_SNAKE_CASE =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_a , **_a )
class A__ ( lowerCamelCase__ ):
A__ = 'blip'
A__ = True
def __init__( self : Union[str, Any] , _a : List[str]=None , _a : str=None , _a : int=512 , _a : Dict=2.65_92 , _a : List[str]=256 , **_a : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
_SCREAMING_SNAKE_CASE ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
_SCREAMING_SNAKE_CASE ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
_SCREAMING_SNAKE_CASE =BlipTextConfig(**_a )
_SCREAMING_SNAKE_CASE =BlipVisionConfig(**_a )
_SCREAMING_SNAKE_CASE =self.vision_config.hidden_size
_SCREAMING_SNAKE_CASE =projection_dim
_SCREAMING_SNAKE_CASE =logit_scale_init_value
_SCREAMING_SNAKE_CASE =1.0
_SCREAMING_SNAKE_CASE =0.02
_SCREAMING_SNAKE_CASE =image_text_hidden_size
@classmethod
def A ( cls : List[str] , _a : List[str] , _a : int , **_a : Dict ) -> Dict:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def A ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.text_config.to_dict()
_SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 356 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
assert _test_patching.open is open
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , _UpperCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , _UpperCamelCase ):
pass
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , _UpperCamelCase ) is None
with patch_submodule(_test_patching , 'len' , _UpperCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_start_and_stop_mock__'
_SCREAMING_SNAKE_CASE =patch_submodule(_test_patching , 'open' , _UpperCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_successive_join__'
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_successive_dirname__'
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.rename' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , _UpperCamelCase ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , _UpperCamelCase ):
pass
| 114 | 0 |
from __future__ import annotations
import math
def lowercase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int ) -> float:
_snake_case : Optional[Any] = u
for i in range(1 , _lowerCamelCase ):
_snake_case : Tuple = temp * (u - i)
return temp
def lowercase ( ) -> None:
_snake_case : Tuple = int(input("""enter the numbers of values: """ ) )
_snake_case : list[list[float]] = []
for _ in range(_lowerCamelCase ):
y.append([] )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
y[i].append(_lowerCamelCase )
_snake_case : Any = 0
print("""enter the values of parameters in a list: """ )
_snake_case : List[str] = list(map(_lowerCamelCase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(_lowerCamelCase ):
_snake_case : int = float(input() )
_snake_case : Optional[Any] = int(input("""enter the value to interpolate: """ ) )
_snake_case : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowerCamelCase ):
for j in range(n - i ):
_snake_case : Tuple = y[j + 1][i - 1] - y[j][i - 1]
_snake_case : Tuple = y[0][0]
for i in range(1 , _lowerCamelCase ):
summ += (ucal(_lowerCamelCase , _lowerCamelCase ) * y[0][i]) / math.factorial(_lowerCamelCase )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 317 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309 | 0 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _A ( lowercase ):
"""simple docstring"""
a =[
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def _A ( lowercase ):
"""simple docstring"""
a , a =emb.weight.shape
a =nn.Linear(lowercase , lowercase , bias=lowercase )
a =emb.weight.data
return lin_layer
def _A ( lowercase ):
"""simple docstring"""
a =torch.load(lowercase , map_location='''cpu''' )
a =Namespace(**checkpoint['''cfg''']['''model'''] )
a =checkpoint['''model''']
remove_ignore_keys_(lowercase )
a =state_dict['''decoder.embed_tokens.weight'''].shape[0]
a ={key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
a =XGLMConfig(
vocab_size=lowercase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
a =XGLMForCausalLM(lowercase )
a =model.load_state_dict(lowercase , strict=lowercase )
print(lowercase )
a =make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase_ : int = parser.parse_args()
lowerCamelCase_ : Optional[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path) | 215 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : str = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase_ : int = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowerCamelCase_ : Optional[Any] = {
"""google/rembert""": 2_5_6,
}
lowerCamelCase_ : Optional[Any] = """▁"""
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = RemBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A=True , __A=False , __A="[CLS]" , __A="[SEP]" , __A="<unk>" , __A="[SEP]" , __A="<pad>" , __A="[CLS]" , __A="[MASK]" , **__A , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
a =do_lower_case
a =remove_space
a =keep_accents
a =vocab_file
a =False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__A ) )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,) | 215 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_lowercase : Dict ={
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowercase : List[Any] ={
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowercase : List[str] ={
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowercase : Optional[int] ={
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
_lowercase : Dict ={
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
_lowercase : Dict ={
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def lowerCAmelCase_ ( _lowercase : List[str]) -> Dict:
"""simple docstring"""
if isinstance(_lowercase , _lowercase):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""")
def lowerCAmelCase_ ( _lowercase : int , _lowercase : int , _lowercase : Dict , _lowercase : Dict , _lowercase : int=False) -> List[Any]:
"""simple docstring"""
a__ : List[str] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
a__ : Any = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
a__ : Any = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
a__ : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
a__ : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
a__ : List[Any] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
a__ : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
a__ : Optional[int] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
a__ : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
a__ : Optional[int] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
a__ : Union[str, Any] = checkpoint[F'''{old_prefix}.skip_connection.weight''']
a__ : List[str] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCAmelCase_ ( _lowercase : List[Any] , _lowercase : Tuple , _lowercase : int , _lowercase : Dict , _lowercase : List[Any]=None) -> Any:
"""simple docstring"""
a__ , a__ , a__ : Tuple = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0)
a__ , a__ , a__ : Tuple = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0)
a__ : Optional[Any] = checkpoint[F'''{old_prefix}.norm.weight''']
a__ : Tuple = checkpoint[F'''{old_prefix}.norm.bias''']
a__ : str = weight_q.squeeze(-1).squeeze(-1)
a__ : Union[str, Any] = bias_q.squeeze(-1).squeeze(-1)
a__ : Any = weight_k.squeeze(-1).squeeze(-1)
a__ : List[str] = bias_k.squeeze(-1).squeeze(-1)
a__ : Tuple = weight_v.squeeze(-1).squeeze(-1)
a__ : List[Any] = bias_v.squeeze(-1).squeeze(-1)
a__ : List[str] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1).squeeze(-1)
)
a__ : Optional[Any] = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1).squeeze(-1)
return new_checkpoint
def lowerCAmelCase_ ( _lowercase : str , _lowercase : Tuple) -> Union[str, Any]:
"""simple docstring"""
a__ : Union[str, Any] = torch.load(_lowercase , map_location="""cpu""")
a__ : List[str] = {}
a__ : List[Any] = checkpoint["""time_embed.0.weight"""]
a__ : List[Any] = checkpoint["""time_embed.0.bias"""]
a__ : int = checkpoint["""time_embed.2.weight"""]
a__ : int = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
a__ : Optional[int] = checkpoint["""label_emb.weight"""]
a__ : Dict = checkpoint["""input_blocks.0.0.weight"""]
a__ : Optional[int] = checkpoint["""input_blocks.0.0.bias"""]
a__ : str = unet_config["""down_block_types"""]
a__ : Optional[Any] = unet_config["""layers_per_block"""]
a__ : Tuple = unet_config["""attention_head_dim"""]
a__ : Union[str, Any] = unet_config["""block_out_channels"""]
a__ : Dict = 1
a__ : Optional[int] = channels_list[0]
for i, layer_type in enumerate(_lowercase):
a__ : int = channels_list[i]
a__ : Dict = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_lowercase):
a__ : List[str] = F'''down_blocks.{i}.resnets.{j}'''
a__ : List[str] = F'''input_blocks.{current_layer}.0'''
a__ : Any = True if j == 0 and downsample_block_has_skip else False
a__ : Optional[Any] = convert_resnet(_lowercase , _lowercase , _lowercase , _lowercase , has_skip=_lowercase)
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_lowercase):
a__ : Optional[int] = F'''down_blocks.{i}.resnets.{j}'''
a__ : int = F'''input_blocks.{current_layer}.0'''
a__ : Union[str, Any] = True if j == 0 and downsample_block_has_skip else False
a__ : Optional[Any] = convert_resnet(_lowercase , _lowercase , _lowercase , _lowercase , has_skip=_lowercase)
a__ : Dict = F'''down_blocks.{i}.attentions.{j}'''
a__ : List[Any] = F'''input_blocks.{current_layer}.1'''
a__ : List[str] = convert_attention(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
current_layer += 1
if i != len(_lowercase) - 1:
a__ : Tuple = F'''down_blocks.{i}.downsamplers.0'''
a__ : Any = F'''input_blocks.{current_layer}.0'''
a__ : List[str] = convert_resnet(_lowercase , _lowercase , _lowercase , _lowercase)
current_layer += 1
a__ : Any = current_channels
# hardcoded the mid-block for now
a__ : Optional[Any] = """mid_block.resnets.0"""
a__ : Dict = """middle_block.0"""
a__ : int = convert_resnet(_lowercase , _lowercase , _lowercase , _lowercase)
a__ : List[str] = """mid_block.attentions.0"""
a__ : str = """middle_block.1"""
a__ : Any = convert_attention(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
a__ : str = """mid_block.resnets.1"""
a__ : Tuple = """middle_block.2"""
a__ : str = convert_resnet(_lowercase , _lowercase , _lowercase , _lowercase)
a__ : Tuple = 0
a__ : int = unet_config["""up_block_types"""]
for i, layer_type in enumerate(_lowercase):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1):
a__ : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
a__ : Dict = F'''output_blocks.{current_layer}.0'''
a__ : Optional[int] = convert_resnet(_lowercase , _lowercase , _lowercase , _lowercase , has_skip=_lowercase)
current_layer += 1
if i != len(_lowercase) - 1:
a__ : Optional[int] = F'''up_blocks.{i}.upsamplers.0'''
a__ : Optional[Any] = F'''output_blocks.{current_layer-1}.1'''
a__ : Union[str, Any] = convert_resnet(_lowercase , _lowercase , _lowercase , _lowercase)
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1):
a__ : Union[str, Any] = F'''up_blocks.{i}.resnets.{j}'''
a__ : List[str] = F'''output_blocks.{current_layer}.0'''
a__ : Tuple = convert_resnet(_lowercase , _lowercase , _lowercase , _lowercase , has_skip=_lowercase)
a__ : Dict = F'''up_blocks.{i}.attentions.{j}'''
a__ : List[str] = F'''output_blocks.{current_layer}.1'''
a__ : str = convert_attention(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
current_layer += 1
if i != len(_lowercase) - 1:
a__ : Optional[int] = F'''up_blocks.{i}.upsamplers.0'''
a__ : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
a__ : str = convert_resnet(_lowercase , _lowercase , _lowercase , _lowercase)
a__ : Optional[int] = checkpoint["""out.0.weight"""]
a__ : List[Any] = checkpoint["""out.0.bias"""]
a__ : Any = checkpoint["""out.2.weight"""]
a__ : Optional[Any] = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
_lowercase : List[str] =parser.parse_args()
_lowercase : Any =strabool(args.class_cond)
_lowercase : List[str] =os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
_lowercase : Any =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowercase : Union[str, Any] =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_lowercase : Optional[int] =TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
_lowercase : List[Any] =None
_lowercase : List[str] =con_pt_to_diffuser(args.unet_path, unet_config)
_lowercase : Optional[Any] =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_lowercase : List[str] =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_lowercase : List[str] =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowercase : Optional[int] =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
_lowercase : List[Any] =CMStochasticIterativeScheduler(**scheduler_config)
_lowercase : Tuple =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 170 |
_lowercase : Optional[Any] =[sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def lowerCAmelCase_ ( _lowercase : int) -> int:
"""simple docstring"""
a__ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowercase : list[bool | None] =[None] * 1000_0000
_lowercase : Tuple =True
_lowercase : int =False
def lowerCAmelCase_ ( _lowercase : int) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
a__ : Optional[Any] = chain(next_number(_lowercase))
a__ : Dict = number_chain
while number < 1000_0000:
a__ : Any = number_chain
number *= 10
return number_chain
def lowerCAmelCase_ ( _lowercase : int = 1000_0000) -> int:
"""simple docstring"""
for i in range(1 , _lowercase):
if CHAINS[i] is None:
chain(i + 1)
return CHAINS[:number].count(_lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 170 | 1 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
__a = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : str):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = 1
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(__SCREAMING_SNAKE_CASE)
return image
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
torch.manual_seed(0)
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
torch.manual_seed(0)
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
torch.manual_seed(0)
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(__SCREAMING_SNAKE_CASE)
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
def extract(*__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Dict):
class _A :
def __init__( self : int):
'''simple docstring'''
__a = torch.ones([0])
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.pixel_values.to(__SCREAMING_SNAKE_CASE)
return self
return Out()
return extract
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet
__a = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = '''A painting of a squirrel eating a burger'''
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(0)
__a = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''')
__a = output.images
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(0)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet
__a = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE)
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = '''A painting of a squirrel eating a burger'''
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(0)
__a = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''')
__a = output.images
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(0)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=__SCREAMING_SNAKE_CASE)
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
assert isinstance(pipe.scheduler , __SCREAMING_SNAKE_CASE)
assert pipe.safety_checker is None
__a = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__a = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''')
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.dummy_cond_unet
__a = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE)
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
# put models in fp16
__a = unet.half()
__a = vae.half()
__a = bert.half()
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = '''A painting of a squirrel eating a burger'''
__a = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__SCREAMING_SNAKE_CASE)
__a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
__a = 4_003_660_346
__a = 7
# without safety guidance (sld_guidance_scale = 0)
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
# without safety guidance (strong configuration)
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__SCREAMING_SNAKE_CASE)
__a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = '''padme amidala taking a bath artwork, safe for work, no nudity'''
__a = 2_734_971_755
__a = 7
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''')
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
__a = 1_044_355_234
__a = 12
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-7
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 131 | 1 |
import os
def UpperCAmelCase ( ):
"""simple docstring"""
with open(os.path.dirname(lowercase ) + '''/grid.txt''' ) as f:
__lowercase = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
__lowercase = 0
# right
for i in range(20 ):
for j in range(17 ):
__lowercase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__lowercase = temp
# down
for i in range(17 ):
for j in range(20 ):
__lowercase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__lowercase = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__lowercase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__lowercase = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__lowercase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__lowercase = temp
return maximum
if __name__ == "__main__":
print(solution()) | 210 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a : Any = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : str = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 210 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A__ = XLMTokenizer
A__ = False
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowercase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowercase__ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = """lower newer"""
lowercase__ = """lower newer"""
return input_text, output_text
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = XLMTokenizer(self.vocab_file , self.merges_file )
lowercase__ = """lower"""
lowercase__ = ["""low""", """er</w>"""]
lowercase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowercase__ = tokens + ["""<unk>"""]
lowercase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
@slow
def lowerCamelCase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
lowercase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=__UpperCAmelCase )
lowercase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__UpperCAmelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 366 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : List[Any] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''segformer'''
def __init__(self : Dict , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Union[str, Any]=[2, 2, 2, 2] , _UpperCAmelCase : List[str]=[8, 4, 2, 1] , _UpperCAmelCase : str=[32, 64, 160, 256] , _UpperCAmelCase : Optional[int]=[7, 3, 3, 3] , _UpperCAmelCase : int=[4, 2, 2, 2] , _UpperCAmelCase : str=[1, 2, 5, 8] , _UpperCAmelCase : Optional[int]=[4, 4, 4, 4] , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[Any]=1E-6 , _UpperCAmelCase : Optional[Any]=256 , _UpperCAmelCase : Any=255 , **_UpperCAmelCase : str , ) -> Tuple:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , _UpperCAmelCase , )
lowercase__ = num_channels
lowercase__ = num_encoder_blocks
lowercase__ = depths
lowercase__ = sr_ratios
lowercase__ = hidden_sizes
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = mlp_ratios
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = classifier_dropout_prob
lowercase__ = initializer_range
lowercase__ = drop_path_rate
lowercase__ = layer_norm_eps
lowercase__ = decoder_hidden_size
lowercase__ = kwargs.get("""reshape_last_stage""" , _UpperCAmelCase )
lowercase__ = semantic_loss_ignore_index
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ (self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ (self : Optional[int] ) -> float:
"""simple docstring"""
return 1E-4
@property
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
return 12
| 146 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase : str = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=8 ):
'''simple docstring'''
A : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCAmelCase_ ( snake_case__ , snake_case__=512 , snake_case__=512 ):
'''simple docstring'''
A : Optional[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
A : str = np.array(pil_image.convert('''RGB''' ) )
A : Union[str, Any] = arr.astype(np.floataa ) / 1_27.5 - 1
A : Tuple = np.transpose(snake_case__ , [2, 0, 1] )
A : List[str] = torch.from_numpy(snake_case__ ).unsqueeze(0 )
return image
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , movq=SCREAMING_SNAKE_CASE , )
A : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : Any = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE )
A : int = max(num_inference_steps - init_timestep , 0 )
A : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[Any]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE )}' )
A : str = image.to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
A : Optional[int] = image
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Union[str, Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE )
]
A : Optional[int] = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
else:
A : Optional[Any] = self.movq.encode(SCREAMING_SNAKE_CASE ).latent_dist.sample(SCREAMING_SNAKE_CASE )
A : str = self.movq.config.scaling_factor * init_latents
A : Any = torch.cat([init_latents] , dim=0 )
A : Tuple = init_latents.shape
A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
# get latents
A : str = self.scheduler.add_noise(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Dict = init_latents
return latents
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
A : List[Any] = torch.device(F'cuda:{gpu_id}' )
A : Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 ) -> Tuple:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
A : Dict = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
A, A : str = cpu_offload_with_hook(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prev_module_hook=SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
A : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 4.0 , SCREAMING_SNAKE_CASE = 0.3 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> List[str]:
"""simple docstring"""
A : Tuple = self._execution_device
A : Any = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : str = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
A : str = image_embeds.shape[0]
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Dict = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
A : Union[str, Any] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
A : List[str] = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
A : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Dict = [image]
if not all(isinstance(SCREAMING_SNAKE_CASE , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'Input is in incorrect format: {[type(SCREAMING_SNAKE_CASE ) for i in image]}. Currently, we only support PIL image and pytorch tensor' )
A : Tuple = torch.cat([prepare_image(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in image] , dim=0 )
A : Tuple = image.to(dtype=image_embeds.dtype , device=SCREAMING_SNAKE_CASE )
A : Tuple = self.movq.encode(SCREAMING_SNAKE_CASE )['''latents''']
A : int = latents.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
A, A : Optional[int] = self.get_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
A, A : Optional[Any] = downscale_height_and_width(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.movq_scale_factor )
A : Optional[int] = self.prepare_latents(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , image_embeds.dtype , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
A : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A : Dict = {'''image_embeds''': image_embeds}
A : Tuple = self.unet(
sample=SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , added_cond_kwargs=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
A, A : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
A, A : List[str] = noise_pred.chunk(2 )
A, A : Tuple = variance_pred.chunk(2 )
A : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A, A : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A : List[Any] = self.scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , )[0]
# post-processing
A : Tuple = self.movq.decode(SCREAMING_SNAKE_CASE , force_not_quantize=SCREAMING_SNAKE_CASE )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
A : Union[str, Any] = image * 0.5 + 0.5
A : Dict = image.clamp(0 , 1 )
A : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 3 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
a : Optional[Any] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
a : Optional[Any] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : str = 'maskformer'
a : Dict = {'hidden_size': 'mask_feature_size'}
a : Optional[Any] = ['resnet', 'swin']
a : List[Any] = ['detr']
def __init__( self : Optional[int] , __lowercase : int = 256 , __lowercase : int = 256 , __lowercase : float = 0.1 , __lowercase : bool = False , __lowercase : Optional[Dict] = None , __lowercase : Optional[Dict] = None , __lowercase : float = 0.02 , __lowercase : float = 1.0 , __lowercase : float = 1.0 , __lowercase : float = 1.0 , __lowercase : float = 20.0 , __lowercase : Optional[bool] = None , **__lowercase : Tuple , ) -> str:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__UpperCAmelCase : List[str] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Dict = backbone_config.pop("""model_type""" )
__UpperCAmelCase : Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__UpperCAmelCase : List[Any] = DetrConfig()
else:
# verify that the decoder is supported
__UpperCAmelCase : List[Any] = (
decoder_config.pop("""model_type""" ) if isinstance(__lowercase , __lowercase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Dict = CONFIG_MAPPING[decoder_type]
__UpperCAmelCase : Union[str, Any] = config_class.from_dict(__lowercase )
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : List[str] = decoder_config
# main feature dimension for the model
__UpperCAmelCase : Union[str, Any] = fpn_feature_size
__UpperCAmelCase : Optional[Any] = mask_feature_size
# initializer
__UpperCAmelCase : int = init_std
__UpperCAmelCase : Any = init_xavier_std
# Hungarian matcher && loss
__UpperCAmelCase : Any = cross_entropy_weight
__UpperCAmelCase : Optional[Any] = dice_weight
__UpperCAmelCase : List[str] = mask_weight
__UpperCAmelCase : Union[str, Any] = use_auxiliary_loss
__UpperCAmelCase : int = no_object_weight
__UpperCAmelCase : int = output_auxiliary_logits
__UpperCAmelCase : Optional[Any] = self.decoder_config.encoder_attention_heads
__UpperCAmelCase : Dict = self.decoder_config.num_hidden_layers
super().__init__(**__lowercase )
@classmethod
def UpperCAmelCase ( cls : int , __lowercase : PretrainedConfig , __lowercase : PretrainedConfig , **__lowercase : str ) -> Tuple:
return cls(
backbone_config=__lowercase , decoder_config=__lowercase , **__lowercase , )
def UpperCAmelCase ( self : List[Any] ) -> Dict[str, any]:
__UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[int] = self.backbone_config.to_dict()
__UpperCAmelCase : Any = self.decoder_config.to_dict()
__UpperCAmelCase : Tuple = self.__class__.model_type
return output
| 114 | 0 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def __magic_name__ ( A , A , A , A ) -> Tuple[int, int]:
def constraint_to_multiple_of(A , A , A=0 , A=None ):
snake_case = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
snake_case = math.floor(val / multiple ) * multiple
if x < min_val:
snake_case = math.ceil(val / multiple ) * multiple
return x
snake_case = (output_size, output_size) if isinstance(A , A ) else output_size
snake_case , snake_case = get_image_size(A )
snake_case , snake_case = output_size
# determine new height and width
snake_case = output_height / input_height
snake_case = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
snake_case = scale_width
else:
# fit height
snake_case = scale_height
snake_case = constraint_to_multiple_of(scale_height * input_height , multiple=A )
snake_case = constraint_to_multiple_of(scale_width * input_width , multiple=A )
return (new_height, new_width)
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ['''pixel_values''']
def __init__( self, lowercase_ = True, lowercase_ = None, lowercase_ = PILImageResampling.BILINEAR, lowercase_ = False, lowercase_ = 1, lowercase_ = True, lowercase_ = 1 / 255, lowercase_ = True, lowercase_ = None, lowercase_ = None, **lowercase_, ) -> None:
super().__init__(**lowercase_ )
snake_case = size if size is not None else {'height': 384, 'width': 384}
snake_case = get_size_dict(lowercase_ )
snake_case = do_resize
snake_case = size
snake_case = keep_aspect_ratio
snake_case = ensure_multiple_of
snake_case = resample
snake_case = do_rescale
snake_case = rescale_factor
snake_case = do_normalize
snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = False, lowercase_ = 1, lowercase_ = PILImageResampling.BICUBIC, lowercase_ = None, **lowercase_, ) -> np.ndarray:
snake_case = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
snake_case = get_resize_output_image_size(
lowercase_, output_size=(size['height'], size['width']), keep_aspect_ratio=lowercase_, multiple=lowercase_, )
return resize(lowercase_, size=lowercase_, resample=lowercase_, data_format=lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> List[str]:
return rescale(lowercase_, scale=lowercase_, data_format=lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> np.ndarray:
return normalize(lowercase_, mean=lowercase_, std=lowercase_, data_format=lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = ChannelDimension.FIRST, **lowercase_, ) -> PIL.Image.Image:
snake_case = do_resize if do_resize is not None else self.do_resize
snake_case = size if size is not None else self.size
snake_case = get_size_dict(lowercase_ )
snake_case = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
snake_case = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
snake_case = resample if resample is not None else self.resample
snake_case = do_rescale if do_rescale is not None else self.do_rescale
snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case = do_normalize if do_normalize is not None else self.do_normalize
snake_case = image_mean if image_mean is not None else self.image_mean
snake_case = image_std if image_std is not None else self.image_std
snake_case = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
snake_case = [self.resize(image=lowercase_, size=lowercase_, resample=lowercase_ ) for image in images]
if do_rescale:
snake_case = [self.rescale(image=lowercase_, scale=lowercase_ ) for image in images]
if do_normalize:
snake_case = [self.normalize(image=lowercase_, mean=lowercase_, std=lowercase_ ) for image in images]
snake_case = [to_channel_dimension_format(lowercase_, lowercase_ ) for image in images]
snake_case = {'pixel_values': images}
return BatchFeature(data=lowercase_, tensor_type=lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple:
snake_case = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowercase_ ):
snake_case = target_sizes.numpy()
snake_case = []
for idx in range(len(lowercase_ ) ):
snake_case = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode='bilinear', align_corners=lowercase_ )
snake_case = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
snake_case = logits.argmax(dim=1 )
snake_case = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 365 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
_UpperCAmelCase : List[Any] = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_UpperCAmelCase : List[Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , """func""" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : Tuple = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 215 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = RemBertConfig.from_json_file(lowerCAmelCase_ )
print("""Building PyTorch model from configuration: {}""".format(str(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Any = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowerCAmelCase_ ) )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A_ : Any = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 215 | 1 |
import argparse
from collections import defaultdict
import yaml
A : Any = 'docs/source/en/_toctree.yml'
def UpperCamelCase ( __magic_name__ : Any ) -> Any:
"""simple docstring"""
lowercase__ = defaultdict(__magic_name__ )
lowercase__ = []
lowercase__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__magic_name__ )
lowercase__ = new_doc_list
lowercase__ = [key for key, value in counts.items() if value > 1]
lowercase__ = []
for duplicate_key in duplicates:
lowercase__ = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
lowercase__ = sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__magic_name__ ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__magic_name__ )
# Sort
return overview_doc
def UpperCamelCase ( __magic_name__ : Dict=False ) -> Any:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]["""sections"""]
# Then to the model doc
lowercase__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase__ = api_doc[scheduler_idx]["""sections"""]
lowercase__ = clean_doc_toc(__magic_name__ )
lowercase__ = False
if new_scheduler_doc != scheduler_doc:
lowercase__ = True
if overwrite:
lowercase__ = new_scheduler_doc
if diff:
if overwrite:
lowercase__ = api_doc
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def UpperCamelCase ( __magic_name__ : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]["""sections"""]
# Then to the model doc
lowercase__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase__ = False
lowercase__ = api_doc[pipeline_idx]["""sections"""]
lowercase__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase__ = pipeline_doc["""section"""]
lowercase__ = clean_doc_toc(__magic_name__ )
if overwrite:
lowercase__ = new_sub_pipeline_doc
new_pipeline_docs.append(__magic_name__ )
# sort overall pipeline doc
lowercase__ = clean_doc_toc(__magic_name__ )
if new_pipeline_docs != pipeline_docs:
lowercase__ = True
if overwrite:
lowercase__ = new_pipeline_docs
if diff:
if overwrite:
lowercase__ = api_doc
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A : Optional[int] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 146 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
lowercase__ = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowercase__ = model(_UpperCAmelCase )["""last_hidden_state"""]
lowercase__ = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 146 | 1 |
import baseaa
def lowerCamelCase_ ( _a ):
"""simple docstring"""
return baseaa.aaaencode(string.encode('''utf-8''' ) )
def lowerCamelCase_ ( _a ):
"""simple docstring"""
return baseaa.aaadecode(_a ).decode('''utf-8''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 |
import unittest
from transformers import DonutProcessor
lowerCamelCase = '''naver-clova-ix/donut-base'''
class _a ( unittest.TestCase):
def UpperCAmelCase__( self : str )-> int:
lowerCAmelCase__ : Any = DonutProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] )-> List[Any]:
lowerCAmelCase__ : Dict = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowerCAmelCase__ : Any = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowerCAmelCase__ : str = self.processor.tokenajson(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 131 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
lowercase__ : List[str] = 1
lowercase__ : Union[str, Any] = 2
while i * i <= n:
lowercase__ : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : Dict = 1
lowercase__ : Optional[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(__lowerCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 302 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302 | 1 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 0
SCREAMING_SNAKE_CASE_: Any = len(_UpperCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _UpperCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def A_ ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) <= 1:
return arr, 0
SCREAMING_SNAKE_CASE_: str = len(_UpperCAmelCase ) // 2
SCREAMING_SNAKE_CASE_: List[str] = arr[0:mid]
SCREAMING_SNAKE_CASE_: Dict = arr[mid:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = count_inversions_recursive(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = count_inversions_recursive(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = _count_cross_inversions(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(_UpperCAmelCase ) and j < len(_UpperCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_UpperCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_UpperCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def A_ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
SCREAMING_SNAKE_CASE_: Optional[int] = count_inversions_bf(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , _UpperCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
SCREAMING_SNAKE_CASE_: str = count_inversions_bf(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _UpperCAmelCase )
# an empty list should also have zero inversions
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: Union[str, Any] = count_inversions_bf(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 13 |
from collections.abc import Callable
def _a ( SCREAMING_SNAKE_CASE : Callable[[float], float] , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
UpperCamelCase__ : float = a
UpperCamelCase__ : float = b
if function(SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCamelCase__ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) < 0:
UpperCamelCase__ : Tuple = mid
else:
UpperCamelCase__ : Dict = mid
UpperCamelCase__ : List[str] = start + (end - start) / 2.0
return mid
def _a ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 146 | 0 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __lowerCAmelCase ( ):
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case : Optional[int] = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , __SCREAMING_SNAKE_CASE ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __lowerCAmelCase ( ):
'''simple docstring'''
assert _test_patching.open is open
__snake_case : Optional[int] = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , __SCREAMING_SNAKE_CASE ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __lowerCAmelCase ( ):
'''simple docstring'''
# pandas.read_csv is not present in _test_patching
__snake_case : Dict = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , __SCREAMING_SNAKE_CASE ):
pass
def __lowerCAmelCase ( ):
'''simple docstring'''
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case : Optional[Any] = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , __SCREAMING_SNAKE_CASE ) is None
with patch_submodule(_test_patching , """len""" , __SCREAMING_SNAKE_CASE ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Dict = """__test_patch_submodule_start_and_stop_mock__"""
__snake_case : Dict = patch_submodule(_test_patching , """open""" , __SCREAMING_SNAKE_CASE )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __lowerCAmelCase ( ):
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case : int = """__test_patch_submodule_successive_join__"""
__snake_case : Tuple = """__test_patch_submodule_successive_dirname__"""
__snake_case : Dict = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , __SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , """os.rename""" , __SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , """os.path.dirname""" , __SCREAMING_SNAKE_CASE ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , __SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , """os.path.join""" , __SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , """os.path.dirname""" , __SCREAMING_SNAKE_CASE ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Optional[int] = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , __SCREAMING_SNAKE_CASE ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , __SCREAMING_SNAKE_CASE ):
pass
| 20 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
lowercase_ = {
"junnyu/roformer_chinese_small": 15_36,
"junnyu/roformer_chinese_base": 15_36,
"junnyu/roformer_chinese_char_small": 5_12,
"junnyu/roformer_chinese_char_base": 5_12,
"junnyu/roformer_small_discriminator": 1_28,
"junnyu/roformer_small_generator": 1_28,
}
lowercase_ = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = PRETRAINED_INIT_CONFIGURATION
A : List[str] = RoFormerTokenizer
def __init__( self : Optional[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any="[UNK]" , _lowerCAmelCase : int="[SEP]" , _lowerCAmelCase : Optional[int]="[PAD]" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Dict , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
):
__snake_case : Tuple = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
__snake_case : List[Any] = do_lower_case
__snake_case : Optional[Any] = strip_accents
__snake_case : List[str] = pre_tok_class(**_lowerCAmelCase )
__snake_case : Optional[Any] = do_lower_case
def __getstate__( self : Optional[Any] ):
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : str , _lowerCAmelCase : Dict ):
__snake_case : str = d
__snake_case : int = self.__dict__["""_tokenizer"""].get_vocab()
__snake_case : List[str] = PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=None ):
__snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case__ ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Tuple , ):
__snake_case : Tuple = BertPreTokenizer()
return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
| 20 | 1 |
from __future__ import annotations
def snake_case_ ( snake_case ) -> Dict:
return len(set(snake_case_ ) ) == len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : Union[str, Any] = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 332 | 0 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
return (data["data"], data["target"])
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = XGBClassifier()
classifier.fit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return classifier
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Any = load_iris()
lowerCAmelCase_ : Union[str, Any] = data_handling(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : int = train_test_split(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , test_size=0.25 )
lowerCAmelCase_ : Optional[Any] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
lowerCAmelCase_ : List[Any] = xgboost(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , display_labels=__SCREAMING_SNAKE_CASE , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 370 |
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __lowerCamelCase ( __UpperCamelCase ) -> Any:
"""simple docstring"""
if not is_accelerate_available():
return method
lowerCAmelCase_ : Union[str, Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(__UpperCamelCase ) < version.parse("0.17.0" ):
return method
def wrapper(self , *__UpperCamelCase , **__UpperCamelCase ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *__UpperCamelCase , **__UpperCamelCase )
return wrapper
| 161 | 0 |
from typing import Any
def _a ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : dict , ):
"""simple docstring"""
_validation(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
# Creates data structures and fill initial step
UpperCamelCase__ : dict = {}
UpperCamelCase__ : dict = {}
for state in states_space:
UpperCamelCase__ : Any = observations_space[0]
UpperCamelCase__ : List[str] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase__ : int = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ : Dict = observations_space[o]
UpperCamelCase__ : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : List[Any] = -1
for k_state in states_space:
UpperCamelCase__ : int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase__ : str = probability
UpperCamelCase__ : Dict = k_state
# Update probabilities and pointers dicts
UpperCamelCase__ : Dict = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase__ : Union[str, Any] = arg_max
# The final observation
UpperCamelCase__ : Dict = observations_space[len(SCREAMING_SNAKE_CASE ) - 1]
# argmax for given final observation
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : str = -1
for k_state in states_space:
UpperCamelCase__ : Optional[int] = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase__ : List[Any] = probability
UpperCamelCase__ : int = k_state
UpperCamelCase__ : Dict = arg_max
# Process pointers backwards
UpperCamelCase__ : Tuple = last_state
UpperCamelCase__ : str = []
for o in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
result.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , ):
"""simple docstring"""
_validate_not_empty(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
_validate_lists(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_validate_dicts(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , ):
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
_validate_list(SCREAMING_SNAKE_CASE , '''observations_space''' )
_validate_list(SCREAMING_SNAKE_CASE , '''states_space''' )
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if not isinstance(_object , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Union[str, Any] = F"{var_name} must be a list"
raise ValueError(SCREAMING_SNAKE_CASE )
else:
for x in _object:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = F"{var_name} must be a list of strings"
raise ValueError(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , ):
"""simple docstring"""
_validate_dict(SCREAMING_SNAKE_CASE , '''initial_probabilities''' , SCREAMING_SNAKE_CASE )
_validate_nested_dict(SCREAMING_SNAKE_CASE , '''transition_probabilities''' )
_validate_nested_dict(SCREAMING_SNAKE_CASE , '''emission_probabilities''' )
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
_validate_dict(_object , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for x in _object.values():
_validate_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : type , SCREAMING_SNAKE_CASE : bool = False ):
"""simple docstring"""
if not isinstance(_object , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Any = F"{var_name} must be a dict"
raise ValueError(SCREAMING_SNAKE_CASE )
if not all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for x in _object ):
UpperCamelCase__ : Optional[Any] = F"{var_name} all keys must be strings"
raise ValueError(SCREAMING_SNAKE_CASE )
if not all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for x in _object.values() ):
UpperCamelCase__ : Tuple = '''nested dictionary ''' if nested else ''''''
UpperCamelCase__ : Tuple = F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 146 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: str = XLMTokenizer
A: Optional[Any] = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCamelCase__ : Optional[int] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
UpperCamelCase__ : Optional[Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Dict ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = '''lower newer'''
UpperCamelCase__ : List[str] = '''lower newer'''
return input_text, output_text
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase__ : Tuple = '''lower'''
UpperCamelCase__ : Dict = ['''low''', '''er</w>''']
UpperCamelCase__ : Optional[int] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = tokens + ['''<unk>''']
UpperCamelCase__ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Any = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
UpperCamelCase__ : List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
UpperCamelCase__ : Any = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 146 | 1 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool:
lowercase_ : str = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCamelCase ( UpperCAmelCase__ : int = 5000 ) -> int:
lowercase_ : Optional[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , UpperCAmelCase__ )]
for i, pentagonal_i in enumerate(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ , len(UpperCAmelCase__ ) ):
lowercase_ : List[str] = pentagonal_nums[j]
lowercase_ : Union[str, Any] = pentagonal_i + pentagonal_j
lowercase_ : Any = pentagonal_j - pentagonal_i
if is_pentagonal(UpperCAmelCase__ ) and is_pentagonal(UpperCAmelCase__ ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 21 | '''simple docstring'''
def lowerCamelCase ( ) -> Dict:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 1
while len(UpperCAmelCase__ ) < 1e6:
constant.append(str(UpperCAmelCase__ ) )
i += 1
lowercase_ : int = """""".join(UpperCAmelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 21 | 1 |
from __future__ import annotations
lowerCamelCase__ = """#"""
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in text:
if char not in trie:
__a = {}
__a = trie[char]
__a = True
def UpperCamelCase_ ( self : Tuple , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in prefix:
if char in trie:
__a = trie[char]
else:
return []
return self._elements(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
__a = []
for c, v in d.items():
__a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowerCamelCase__ = Trie()
lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def lowerCAmelCase__ ( ):
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='albert'
def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 302 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 335 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : Optional[int]= BioGptTokenizer
_a : Optional[Any]= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowercase : int = dict(zip(snake_case ,range(len(snake_case ) ) ) )
lowercase : Union[str, Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ) as fp:
fp.write(json.dumps(snake_case ) )
with open(self.merges_file ,"""w""" ) as fp:
fp.write("""\n""".join(snake_case ) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = """lower newer"""
lowercase : int = """lower newer"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = BioGptTokenizer(self.vocab_file ,self.merges_file )
lowercase : Any = """lower"""
lowercase : Optional[int] = ["""low""", """er</w>"""]
lowercase : Optional[int] = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case ,snake_case )
lowercase : Optional[int] = tokens + ["""<unk>"""]
lowercase : Optional[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) ,snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowercase : Dict = tokenizer.encode("""sequence builders""" ,add_special_tokens=snake_case )
lowercase : Any = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=snake_case )
lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(snake_case )
lowercase : int = tokenizer.build_inputs_with_special_tokens(snake_case ,snake_case )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 20 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __snake_case ( unittest.TestCase ):
_a : Optional[int]= MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Any = hf_hub_download(
repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" )
lowercase : List[str] = VideoClassificationPipeline(model=snake_case ,image_processor=snake_case ,top_k=2 )
lowercase : Dict = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
for example in examples:
lowercase : int = video_classifier(snake_case )
self.assertEqual(
snake_case ,[
{"""score""": ANY(snake_case ), """label""": ANY(snake_case )},
{"""score""": ANY(snake_case ), """label""": ANY(snake_case )},
] ,)
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
lowercase : str = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} ,crop_size={"""height""": 10, """width""": 10} )
lowercase : List[Any] = pipeline(
"""video-classification""" ,model=snake_case ,feature_extractor=snake_case ,frame_sampling_rate=4 )
lowercase : Dict = hf_hub_download(repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" )
lowercase : Any = video_classifier(snake_case ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] ,)
lowercase : str = video_classifier(
[
video_file_path,
video_file_path,
] ,top_k=2 ,)
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] ,)
@require_tf
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
| 20 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCamelCase_ : Any = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
UpperCamelCase_ : Union[str, Any] = {
'''camembert-base''': 512,
}
UpperCamelCase_ : Union[str, Any] = '''▁'''
class _a ( snake_case_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="<unk>" ,_SCREAMING_SNAKE_CASE="<pad>" ,_SCREAMING_SNAKE_CASE="<mask>" ,_SCREAMING_SNAKE_CASE=["<s>NOTUSED", "</s>NOTUSED"] ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else mask_token
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,additional_special_tokens=_SCREAMING_SNAKE_CASE ,sp_model_kwargs=self.sp_model_kwargs ,**_SCREAMING_SNAKE_CASE ,)
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
_snake_case = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_snake_case = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
_snake_case = len(self.fairseq_tokens_to_ids )
_snake_case = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE ,token_ids_a=_SCREAMING_SNAKE_CASE ,already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowercase ( self ) -> str:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def _lowercase ( self ) -> Optional[Any]:
_snake_case = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.sp_model.encode(_SCREAMING_SNAKE_CASE ,out_type=_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
_snake_case = []
_snake_case = ""
_snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
_snake_case = True
_snake_case = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
_snake_case = False
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self ) -> str:
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
_snake_case = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE ,"wb" ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 353 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( _UpperCamelCase: Dict , _UpperCamelCase: Optional[int] , _UpperCamelCase: List[str] ) -> Optional[Any]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __a ( _UpperCamelCase: List[Any] , _UpperCamelCase: Optional[Any] , _UpperCamelCase: Dict , _UpperCamelCase: Optional[Any]="attention" ) -> Any:
"""simple docstring"""
_snake_case = _snake_case = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_snake_case = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_snake_case = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_snake_case = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_snake_case = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_snake_case = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_snake_case = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_snake_case = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: Optional[int] , _UpperCamelCase: Optional[int] , _UpperCamelCase: Optional[int]=False ) -> List[Any]:
"""simple docstring"""
if split_mlp_wi:
_snake_case = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_snake_case = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_snake_case = (wi_a, wi_a)
else:
_snake_case = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_snake_case = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __a ( _UpperCamelCase: Optional[int] , _UpperCamelCase: Dict , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __a ( _UpperCamelCase: dict , *, _UpperCamelCase: int , _UpperCamelCase: bool , _UpperCamelCase: bool = False ) -> str:
"""simple docstring"""
_snake_case = traverse_util.flatten_dict(variables["target"] )
_snake_case = {"/".join(_UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_snake_case = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , _UpperCamelCase )
_snake_case = collections.OrderedDict()
# Shared embeddings.
_snake_case = old["token_embedder/embedding"]
# Encoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "pre_attention_layer_norm" )
_snake_case , _snake_case , _snake_case , _snake_case = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "attention" )
_snake_case = layer_norm
_snake_case = k.T
_snake_case = o.T
_snake_case = q.T
_snake_case = v.T
# Block i, layer 1 (MLP).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "pre_mlp_layer_norm" )
_snake_case , _snake_case = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , _UpperCamelCase )
_snake_case = layer_norm
if split_mlp_wi:
_snake_case = wi[0].T
_snake_case = wi[1].T
else:
_snake_case = wi.T
_snake_case = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_snake_case = tax_relpos_bias_lookup(
_UpperCamelCase , _UpperCamelCase , "encoder" ).T
_snake_case = old["encoder/encoder_norm/scale"]
if not scalable_attention:
_snake_case = tax_relpos_bias_lookup(
_UpperCamelCase , 0 , "encoder" ).T
_snake_case = tax_relpos_bias_lookup(
_UpperCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_self_attention_layer_norm" )
_snake_case , _snake_case , _snake_case , _snake_case = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "self_attention" )
_snake_case = layer_norm
_snake_case = k.T
_snake_case = o.T
_snake_case = q.T
_snake_case = v.T
# Block i, layer 1 (Cross Attention).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
_snake_case , _snake_case , _snake_case , _snake_case = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "encoder_decoder_attention" )
_snake_case = layer_norm
_snake_case = k.T
_snake_case = o.T
_snake_case = q.T
_snake_case = v.T
# Block i, layer 2 (MLP).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_mlp_layer_norm" )
_snake_case , _snake_case = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , _UpperCamelCase )
_snake_case = layer_norm
if split_mlp_wi:
_snake_case = wi[0].T
_snake_case = wi[1].T
else:
_snake_case = wi.T
_snake_case = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_snake_case = tax_relpos_bias_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" ).T
_snake_case = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_snake_case = old["decoder/logits_dense/kernel"].T
return new
def __a ( _UpperCamelCase: Any , _UpperCamelCase: bool ) -> Dict:
"""simple docstring"""
_snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_snake_case = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_snake_case = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
_snake_case = state_dict["shared.weight"]
return state_dict
def __a ( _UpperCamelCase: str , _UpperCamelCase: List[str] , _UpperCamelCase: Any , _UpperCamelCase: str , _UpperCamelCase: List[Any] ) -> Dict:
"""simple docstring"""
_snake_case = checkpoints.load_tax_checkpoint(_UpperCamelCase )
_snake_case = convert_tax_to_pytorch(
_UpperCamelCase , num_layers=config.num_layers , is_encoder_only=_UpperCamelCase , scalable_attention=_UpperCamelCase )
_snake_case = make_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Optional[Any] , _UpperCamelCase: bool = False , _UpperCamelCase: bool = False , ) -> Dict:
"""simple docstring"""
_snake_case = MTaConfig.from_json_file(_UpperCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_snake_case = UMTaEncoderModel(_UpperCamelCase )
else:
_snake_case = UMTaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCamelCase )
print("Done" )
if __name__ == "__main__":
UpperCamelCase_ : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 142 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : int, UpperCamelCase_ : int) -> str:
'''simple docstring'''
if not isinstance(UpperCamelCase_, UpperCamelCase_):
raise ValueError("iterations must be defined as integers")
if not isinstance(UpperCamelCase_, UpperCamelCase_) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0")
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz")
__lowercase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCamelCase_)
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[int] , _A :str , _A :Dict ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self :List[str] , _A :int = 1 , _A :Optional[torch.Generator] = None , _A :int = 50 , _A :Optional[str] = "pil" , _A :bool = True , **_A :Tuple , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__A = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_A , )
__A = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__A = self.unet(_A , _A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A = self.scheduler.step(_A , _A , _A ).prev_sample
__A = (image / 2 + 0.5).clamp(0 , 1 )
__A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(_A )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_A ), "This is a local test"
| 161 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 354 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
_lowercase : int = split_dict._to_yaml_list()
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
_lowercase : str = SplitDict._from_yaml_list(lowerCamelCase_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_lowercase : int = None
# the split name of split_dict takes over the name of the split info object
_lowercase : Optional[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=lowerCamelCase_ ), SplitInfo(dataset_name='my_dataset' )] )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
_lowercase : List[str] = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 21 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = ConsistencyModelPipeline
lowercase_ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase_ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase_ : List[str] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet', )
return unet
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet_class_cond', )
return unet
def UpperCamelCase ( self, lowerCamelCase=False) -> Dict:
"""simple docstring"""
if class_cond:
_lowercase : Union[str, Any] = self.dummy_cond_unet
else:
_lowercase : Union[str, Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
_lowercase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Tuple:
"""simple docstring"""
if str(lowerCamelCase).startswith('mps'):
_lowercase : str = torch.manual_seed(lowerCamelCase)
else:
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Optional[int] = self.get_dummy_components()
_lowercase : str = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : Dict = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : int = image[0, -3:, -3:, -1]
_lowercase : Dict = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase)
_lowercase : Any = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Any = 0
_lowercase : List[str] = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Any = self.get_dummy_components()
_lowercase : Optional[Any] = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : List[str] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Union[str, Any] = 1
_lowercase : Tuple = None
_lowercase : Tuple = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase)
_lowercase : Dict = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : Optional[Any] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Tuple = 1
_lowercase : int = None
_lowercase : Tuple = 0
_lowercase : Dict = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase=False, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = torch.manual_seed(lowerCamelCase)
_lowercase : str = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
_lowercase : Optional[Any] = self.get_fixed_latents(seed=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase, shape=lowerCamelCase)
_lowercase : Tuple = latents
return inputs
def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Any:
"""simple docstring"""
if type(lowerCamelCase) == str:
_lowercase : Union[str, Any] = torch.device(lowerCamelCase)
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : List[str] = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
return latents
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Any = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : Optional[Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Union[str, Any] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_inputs()
_lowercase : int = 1
_lowercase : Optional[Any] = None
_lowercase : str = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : List[Any] = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
@require_torch_a
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Optional[int] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase):
_lowercase : Dict = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@require_torch_a
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : int = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase)
_lowercase : int = 1
_lowercase : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase):
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : int = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 21 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _snake_case :
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def __lowerCamelCase ( UpperCAmelCase_ : TreeNode | None ):
"""simple docstring"""
def is_valid_tree(UpperCAmelCase_ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(UpperCAmelCase_ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
UpperCAmelCase_ : TreeNode | None , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , UpperCAmelCase_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , UpperCAmelCase_ )
)
return is_binary_search_tree_recursive_check(UpperCAmelCase_ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Optional[Any] = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
snake_case : str = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
snake_case : List[Any] = {
'''vinai/phobert-base''': 2_56,
'''vinai/phobert-large''': 2_56,
}
def __lowerCamelCase ( UpperCAmelCase_ : List[str] ):
"""simple docstring"""
a :Union[str, Any] = set()
a :str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a :Optional[int] = char
a :Optional[int] = set(UpperCAmelCase_ )
return pairs
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , **_lowerCamelCase , ):
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
a :Optional[Any] = vocab_file
a :Optional[Any] = merges_file
a :Any = {}
a :Any = 0
a :int = 1
a :Union[str, Any] = 2
a :List[Any] = 3
self.add_from_file(_lowerCamelCase )
a :List[str] = {v: k for k, v in self.encoder.items()}
with open(_lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
a :List[str] = merges_handle.read().split('''\n''' )[:-1]
a :Any = [tuple(merge.split()[:-1] ) for merge in merges]
a :str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :str = {}
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a :Union[str, Any] = [self.cls_token_id]
a :Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
a :Optional[int] = [self.sep_token_id]
a :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if token in self.cache:
return self.cache[token]
a :Optional[int] = tuple(_lowerCamelCase )
a :List[str] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
a :Union[str, Any] = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
a :Optional[Any] = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a , a :Dict = bigram
a :Union[str, Any] = []
a :int = 0
while i < len(_lowerCamelCase ):
try:
a :Optional[Any] = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a :Union[str, Any] = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a :Union[str, Any] = tuple(_lowerCamelCase )
a :int = new_word
if len(_lowerCamelCase ) == 1:
break
else:
a :List[str] = get_pairs(_lowerCamelCase )
a :Union[str, Any] = '''@@ '''.join(_lowerCamelCase )
a :Dict = word[:-4]
a :Any = word
return word
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Union[str, Any] = []
a :str = re.findall(R'''\S+\n?''' , _lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.decoder.get(_lowerCamelCase , self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[int] = ''' '''.join(_lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a :Tuple = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a :Optional[int] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.merges_file , _lowerCamelCase )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
try:
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(_lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
a :str = f.readlines()
for lineTmp in lines:
a :Tuple = lineTmp.strip()
a :int = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
a :Tuple = line[:idx]
a :Tuple = len(self.encoder )
| 281 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.