code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : List[Any] = { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class A__ ( __snake_case ): A__ = 'roformer' def __init__( self : Tuple , _a : List[Any]=5_0000 , _a : List[Any]=None , _a : Any=768 , _a : Dict=12 , _a : int=12 , _a : List[Any]=3072 , _a : Any="gelu" , _a : Dict=0.1 , _a : Dict=0.1 , _a : Optional[Any]=1536 , _a : Optional[int]=2 , _a : Optional[int]=0.02 , _a : str=1e-12 , _a : Union[str, Any]=0 , _a : Optional[Any]=False , _a : Optional[int]=True , **_a : Tuple , ) -> List[Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case_ , **snake_case_ ) _SCREAMING_SNAKE_CASE =vocab_size _SCREAMING_SNAKE_CASE =hidden_size if embedding_size is None else embedding_size _SCREAMING_SNAKE_CASE =hidden_size _SCREAMING_SNAKE_CASE =num_hidden_layers _SCREAMING_SNAKE_CASE =num_attention_heads _SCREAMING_SNAKE_CASE =hidden_act _SCREAMING_SNAKE_CASE =intermediate_size _SCREAMING_SNAKE_CASE =hidden_dropout_prob _SCREAMING_SNAKE_CASE =attention_probs_dropout_prob _SCREAMING_SNAKE_CASE =max_position_embeddings _SCREAMING_SNAKE_CASE =type_vocab_size _SCREAMING_SNAKE_CASE =initializer_range _SCREAMING_SNAKE_CASE =layer_norm_eps _SCREAMING_SNAKE_CASE =rotary_value _SCREAMING_SNAKE_CASE =use_cache class A__ ( __snake_case ): @property def A ( self : Dict ) -> Dict: '''simple docstring''' if self.task == "multiple-choice": _SCREAMING_SNAKE_CASE ={0: 'batch', 1: 'choice', 2: 'sequence'} else: _SCREAMING_SNAKE_CASE ={0: 'batch', 1: 'sequence'} _SCREAMING_SNAKE_CASE ={0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
405
from collections.abc import Callable def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" _A = a _A = b if function(_SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function return a elif function(_SCREAMING_SNAKE_CASE ) == 0: return b elif ( function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _A = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_SCREAMING_SNAKE_CASE ) == 0: return mid elif function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) < 0: _A = mid else: _A = mid _A = start + (end - start) / 2.0 return mid def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
27
0
'''simple docstring''' import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( __snake_case ,unittest.TestCase ): __lowerCamelCase : str = BioGptTokenizer __lowerCamelCase : Tuple = False def _snake_case ( self ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _lowerCAmelCase = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] _lowerCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) _lowerCAmelCase = ["l o 123", "lo w 1456", "e r</w> 1789", ""] _lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(snake_case_ ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(snake_case_ ) ) def _snake_case ( self , _lowerCAmelCase ) -> Any: _lowerCAmelCase = "lower newer" _lowerCAmelCase = "lower newer" return input_text, output_text def _snake_case ( self ) -> Any: _lowerCAmelCase = BioGptTokenizer(self.vocab_file , self.merges_file ) _lowerCAmelCase = "lower" _lowerCAmelCase = ["low", "er</w>"] _lowerCAmelCase = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) _lowerCAmelCase = tokens + ["<unk>"] _lowerCAmelCase = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ ) @slow def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) _lowerCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ ) _lowerCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ ) _lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ ) _lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
18
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase: '''simple docstring''' def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ): _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope def lowerCAmelCase__ ( self ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self ): return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = NystromformerModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ ) _A = model(snake_case_ , token_type_ids=snake_case_ ) _A = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = NystromformerForMaskedLM(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = NystromformerForQuestionAnswering(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = self.num_labels _A = NystromformerForSequenceClassification(snake_case_ ) model.to(snake_case_ ) model.eval() _A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = self.num_labels _A = NystromformerForTokenClassification(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = self.num_choices _A = NystromformerForMultipleChoice(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase__ ( self ): _A = self.prepare_config_and_inputs() ( ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ) = config_and_inputs _A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' __magic_name__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) __magic_name__ = ( { 'feature-extraction': NystromformerModel, 'fill-mask': NystromformerForMaskedLM, 'question-answering': NystromformerForQuestionAnswering, 'text-classification': NystromformerForSequenceClassification, 'token-classification': NystromformerForTokenClassification, 'zero-shot': NystromformerForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False def lowerCAmelCase__ ( self ): _A = NystromformerModelTester(self ) _A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 ) def lowerCAmelCase__ ( self ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _A = type self.model_tester.create_and_check_model(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case_ ) @slow def lowerCAmelCase__ ( self ): for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = NystromformerModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @require_torch class lowerCamelCase( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase__ ( self ): _A = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' ) _A = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): _A = model(snake_case_ )[0] _A = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , snake_case_ ) _A = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) ) @slow def lowerCAmelCase__ ( self ): _A = 'the [MASK] of Belgium is Brussels' _A = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' ) _A = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' ) _A = tokenizer(snake_case_ , return_tensors='pt' ) with torch.no_grad(): _A = model(encoding.input_ids ).logits _A = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(snake_case_ ) , 'capital' )
27
0
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE: Tuple = (boundary[1] - boundary[0]) / steps __SCREAMING_SNAKE_CASE: str = boundary[0] __SCREAMING_SNAKE_CASE: Tuple = boundary[1] __SCREAMING_SNAKE_CASE: Any = make_points(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE: List[Any] = 0.0 y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) for i in x_i: # print(i) y += h * f(_SCREAMING_SNAKE_CASE ) y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) return y def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE: Optional[int] = a + h while x < (b - h): yield x __SCREAMING_SNAKE_CASE: Tuple = x + h def lowerCAmelCase ( UpperCamelCase__ : Tuple ) -> Any: # enter your function here """simple docstring""" __SCREAMING_SNAKE_CASE: Union[str, Any] = (x - 0) * (x - 0) return y def lowerCAmelCase ( ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE: List[str] = 0.0 # Lower bound of integration __SCREAMING_SNAKE_CASE: Any = 1.0 # Upper bound of integration __SCREAMING_SNAKE_CASE: List[Any] = 10.0 # define number of steps or resolution __SCREAMING_SNAKE_CASE: List[str] = [a, b] # define boundary of integration __SCREAMING_SNAKE_CASE: Optional[Any] = method_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(F"""y = {y}""" ) if __name__ == "__main__": main()
202
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Dict = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[Any] = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
27
0
'''simple docstring''' import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) __snake_case = logging.getLogger(__name__) def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[Any]: lowercase_ = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 ) return np.sum(outputs == labels ) def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[Any]: with open(_SCREAMING_SNAKE_CASE , encoding="""utf_8""" ) as f: lowercase_ = csv.reader(_SCREAMING_SNAKE_CASE ) lowercase_ = [] next(_SCREAMING_SNAKE_CASE ) # skip the first line for line in tqdm(_SCREAMING_SNAKE_CASE ): output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->List[str]: lowercase_ = [] for dataset in encoded_datasets: lowercase_ = len(_SCREAMING_SNAKE_CASE ) lowercase_ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) lowercase_ = np.zeros((n_batch, 2) , dtype=np.intaa ) lowercase_ = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa ) lowercase_ = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(_SCREAMING_SNAKE_CASE ): lowercase_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowercase_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowercase_ = with_conta lowercase_ = with_conta lowercase_ = len(_SCREAMING_SNAKE_CASE ) - 1 lowercase_ = len(_SCREAMING_SNAKE_CASE ) - 1 lowercase_ = with_conta lowercase_ = with_conta lowercase_ = mc_label lowercase_ = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(_SCREAMING_SNAKE_CASE ) for t in all_inputs ) ) return tensor_datasets def A_ ( ) ->List[str]: lowercase_ = argparse.ArgumentParser() parser.add_argument("""--model_name""" , type=_SCREAMING_SNAKE_CASE , default="""openai-gpt""" , help="""pretrained model name""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" ) parser.add_argument( """--output_dir""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument("""--train_dataset""" , type=_SCREAMING_SNAKE_CASE , default="""""" ) parser.add_argument("""--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , default="""""" ) parser.add_argument("""--seed""" , type=_SCREAMING_SNAKE_CASE , default=42 ) parser.add_argument("""--num_train_epochs""" , type=_SCREAMING_SNAKE_CASE , default=3 ) parser.add_argument("""--train_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 ) parser.add_argument("""--eval_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=16 ) parser.add_argument("""--adam_epsilon""" , default=1e-8 , type=_SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , type=_SCREAMING_SNAKE_CASE , default=1 ) parser.add_argument( """--max_steps""" , default=-1 , type=_SCREAMING_SNAKE_CASE , help=( """If > 0: set total number of training steps to perform. Override num_train_epochs.""" ) , ) parser.add_argument( """--gradient_accumulation_steps""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=6.25e-5 ) parser.add_argument("""--warmup_steps""" , default=0 , type=_SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--lr_schedule""" , type=_SCREAMING_SNAKE_CASE , default="""warmup_linear""" ) parser.add_argument("""--weight_decay""" , type=_SCREAMING_SNAKE_CASE , default=0.01 ) parser.add_argument("""--lm_coef""" , type=_SCREAMING_SNAKE_CASE , default=0.9 ) parser.add_argument("""--n_valid""" , type=_SCREAMING_SNAKE_CASE , default=3_74 ) parser.add_argument("""--server_ip""" , type=_SCREAMING_SNAKE_CASE , default="""""" , help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""" , type=_SCREAMING_SNAKE_CASE , default="""""" , help="""Can be used for distant debugging.""" ) lowercase_ = parser.parse_args() print(_SCREAMING_SNAKE_CASE ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_SCREAMING_SNAKE_CASE ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowercase_ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) lowercase_ = torch.cuda.device_count() logger.info("""device: {}, n_gpu {}""".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) if not args.do_train and not args.do_eval: raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowercase_ = ["""_start_""", """_delimiter_""", """_classify_"""] lowercase_ = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(_SCREAMING_SNAKE_CASE ) lowercase_ = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) lowercase_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) ) model.to(_SCREAMING_SNAKE_CASE ) # Load and encode the datasets def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return obj return [tokenize_and_encode(_SCREAMING_SNAKE_CASE ) for o in obj] logger.info("""Encoding dataset...""" ) lowercase_ = load_rocstories_dataset(args.train_dataset ) lowercase_ = load_rocstories_dataset(args.eval_dataset ) lowercase_ = (train_dataset, eval_dataset) lowercase_ = tokenize_and_encode(_SCREAMING_SNAKE_CASE ) # Compute the max input length for the Transformer lowercase_ = model.config.n_positions // 2 - 2 lowercase_ = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowercase_ = min(_SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowercase_ = pre_process_datasets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) lowercase_ , lowercase_ = tensor_datasets[0], tensor_datasets[1] lowercase_ = TensorDataset(*_SCREAMING_SNAKE_CASE ) lowercase_ = RandomSampler(_SCREAMING_SNAKE_CASE ) lowercase_ = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size ) lowercase_ = TensorDataset(*_SCREAMING_SNAKE_CASE ) lowercase_ = SequentialSampler(_SCREAMING_SNAKE_CASE ) lowercase_ = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowercase_ = args.max_steps lowercase_ = args.max_steps // (len(_SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1 else: lowercase_ = len(_SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs lowercase_ = list(model.named_parameters() ) lowercase_ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""] lowercase_ = [ { """params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], """weight_decay""": args.weight_decay, }, {"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0}, ] lowercase_ = AdamW(_SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon ) lowercase_ = get_linear_schedule_with_warmup( _SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=_SCREAMING_SNAKE_CASE ) if args.do_train: lowercase_ , lowercase_ , lowercase_ = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ): lowercase_ = 0 lowercase_ = 0 lowercase_ = tqdm(_SCREAMING_SNAKE_CASE , desc="""Training""" ) for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): lowercase_ = tuple(t.to(_SCREAMING_SNAKE_CASE ) for t in batch ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = batch lowercase_ = model(_SCREAMING_SNAKE_CASE , mc_token_ids=_SCREAMING_SNAKE_CASE , lm_labels=_SCREAMING_SNAKE_CASE , mc_labels=_SCREAMING_SNAKE_CASE ) lowercase_ = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowercase_ = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowercase_ = """Training loss: {:.2e} lr: {:.2e}""".format(_SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowercase_ = model.module if hasattr(_SCREAMING_SNAKE_CASE , """module""" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowercase_ = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE ) lowercase_ = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE ) torch.save(model_to_save.state_dict() , _SCREAMING_SNAKE_CASE ) model_to_save.config.to_json_file(_SCREAMING_SNAKE_CASE ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowercase_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowercase_ = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(_SCREAMING_SNAKE_CASE ) if args.do_eval: model.eval() lowercase_ , lowercase_ = 0, 0 lowercase_ , lowercase_ = 0, 0 for batch in tqdm(_SCREAMING_SNAKE_CASE , desc="""Evaluating""" ): lowercase_ = tuple(t.to(_SCREAMING_SNAKE_CASE ) for t in batch ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = batch with torch.no_grad(): lowercase_ , lowercase_ , lowercase_ , lowercase_ = model( _SCREAMING_SNAKE_CASE , mc_token_ids=_SCREAMING_SNAKE_CASE , lm_labels=_SCREAMING_SNAKE_CASE , mc_labels=_SCREAMING_SNAKE_CASE ) lowercase_ = mc_logits.detach().cpu().numpy() lowercase_ = mc_labels.to("""cpu""" ).numpy() lowercase_ = accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowercase_ = eval_loss / nb_eval_steps lowercase_ = eval_accuracy / nb_eval_examples lowercase_ = tr_loss / nb_tr_steps if args.do_train else None lowercase_ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss} lowercase_ = os.path.join(args.output_dir , """eval_results.txt""" ) with open(_SCREAMING_SNAKE_CASE , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , _SCREAMING_SNAKE_CASE , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) if __name__ == "__main__": main()
451
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __A : List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: """simple docstring""" require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
27
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class __snake_case ( metaclass=__snake_case ): """simple docstring""" lowerCAmelCase_ : int = ['torch', 'torchsde'] def __init__( self :Dict , *UpperCamelCase__ :Dict , **UpperCamelCase__ :List[str] ): requires_backends(self , ["torch", "torchsde"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls :Any , *UpperCamelCase__ :List[str] , **UpperCamelCase__ :Dict ): requires_backends(cls , ["torch", "torchsde"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls :str , *UpperCamelCase__ :Any , **UpperCamelCase__ :int ): requires_backends(cls , ["torch", "torchsde"] )
388
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return int((input_a, input_a).count(0 ) != 0 ) def __lowerCAmelCase( ) -> None: """simple docstring""" assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
27
0
import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def _lowercase ( lowercase__ ): if ( (cp >= 0X4e_00 and cp <= 0X9f_ff) or (cp >= 0X34_00 and cp <= 0X4d_bf) # or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) # or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) # or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) # or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) # or (cp >= 0Xf9_00 and cp <= 0Xfa_ff) or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) # ): # return True return False def _lowercase ( lowercase__ ): for char in word: __lowerCAmelCase : List[Any] = ord(_SCREAMING_SNAKE_CASE ) if not _is_chinese_char(_SCREAMING_SNAKE_CASE ): return 0 return 1 def _lowercase ( lowercase__ ): __lowerCAmelCase : Union[str, Any] = set() for token in tokens: __lowerCAmelCase : str = len(_SCREAMING_SNAKE_CASE ) > 1 and is_chinese(_SCREAMING_SNAKE_CASE ) if chinese_word: word_set.add(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = list(_SCREAMING_SNAKE_CASE ) return word_list def _lowercase ( lowercase__ , lowercase__ ): if not chinese_word_set: return bert_tokens __lowerCAmelCase : Optional[int] = max([len(_SCREAMING_SNAKE_CASE ) for w in chinese_word_set] ) __lowerCAmelCase : Optional[Any] = bert_tokens __lowerCAmelCase, __lowerCAmelCase : Optional[int] = 0, len(_SCREAMING_SNAKE_CASE ) while start < end: __lowerCAmelCase : Optional[Any] = True if is_chinese(bert_word[start] ): __lowerCAmelCase : Dict = min(end - start , _SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE , 1 , -1 ): __lowerCAmelCase : List[Any] = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): __lowerCAmelCase : int = '''##''' + bert_word[j] __lowerCAmelCase : int = start + i __lowerCAmelCase : Dict = False break if single_word: start += 1 return bert_word def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : List[Any] = [] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 1_0_0 ): __lowerCAmelCase : List[str] = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['''cws'''] ).cws __lowerCAmelCase : Optional[Any] = [get_chinese_word(_SCREAMING_SNAKE_CASE ) for r in res] ltp_res.extend(_SCREAMING_SNAKE_CASE ) assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = [] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 1_0_0 ): __lowerCAmelCase : Optional[int] = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=5_1_2 ) bert_res.extend(res['''input_ids'''] ) assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = [] for input_ids, chinese_word in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = [] for id in input_ids: __lowerCAmelCase : Any = bert_tokenizer._convert_id_to_token(_SCREAMING_SNAKE_CASE ) input_tokens.append(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = add_sub_symbol(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_SCREAMING_SNAKE_CASE ): if token[:2] == "##": __lowerCAmelCase : Dict = token[2:] # save chinese tokens' pos if len(_SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(_SCREAMING_SNAKE_CASE ) ): ref_id.append(_SCREAMING_SNAKE_CASE ) ref_ids.append(_SCREAMING_SNAKE_CASE ) assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) return ref_ids def _lowercase ( lowercase__ ): with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: __lowerCAmelCase : List[str] = f.readlines() __lowerCAmelCase : List[Any] = [line.strip() for line in data if len(_SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' __lowerCAmelCase : int = LTP(args.ltp ) # faster in GPU device __lowerCAmelCase : Optional[Any] = BertTokenizer.from_pretrained(args.bert ) __lowerCAmelCase : List[Any] = prepare_ref(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: __lowerCAmelCase : List[str] = [json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' for ref in ref_ids] f.writelines(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) _UpperCamelCase = parser.parse_args() main(args)
492
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class lowerCamelCase: '''simple docstring''' def __init__( self , snake_case_ , ): _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = 'gelu' _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase__ ( self ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self ): ( ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ) = self.prepare_config_and_inputs() _A = True _A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = TFEsmModel(config=snake_case_ ) _A = {'input_ids': input_ids, 'attention_mask': input_mask} _A = model(snake_case_ ) _A = [input_ids, input_mask] _A = model(snake_case_ ) _A = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ): _A = True _A = TFEsmModel(config=snake_case_ ) _A = { 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } _A = model(snake_case_ ) _A = [input_ids, input_mask] _A = model(snake_case_ , encoder_hidden_states=snake_case_ ) # Also check the case where encoder outputs are not passed _A = model(snake_case_ , attention_mask=snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = TFEsmForMaskedLM(config=snake_case_ ) _A = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = self.num_labels _A = TFEsmForTokenClassification(config=snake_case_ ) _A = {'input_ids': input_ids, 'attention_mask': input_mask} _A = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ ( self ): _A = self.prepare_config_and_inputs() ( ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ) = config_and_inputs _A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' __magic_name__ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) __magic_name__ = ( { 'feature-extraction': TFEsmModel, 'fill-mask': TFEsmForMaskedLM, 'text-classification': TFEsmForSequenceClassification, 'token-classification': TFEsmForTokenClassification, 'zero-shot': TFEsmForSequenceClassification, } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False def lowerCAmelCase__ ( self ): _A = TFEsmModelTester(self ) _A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 ) def lowerCAmelCase__ ( self ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case_ ) @slow def lowerCAmelCase__ ( self ): for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = TFEsmModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @unittest.skip('Protein models do not support embedding resizing.' ) def lowerCAmelCase__ ( self ): pass @unittest.skip('Protein models do not support embedding resizing.' ) def lowerCAmelCase__ ( self ): pass def lowerCAmelCase__ ( self ): _A, _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(snake_case_ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer _A = model.get_bias() assert isinstance(snake_case_ , snake_case_ ) for k, v in name.items(): assert isinstance(snake_case_ , tf.Variable ) else: _A = model.get_output_embeddings() assert x is None _A = model.get_bias() assert name is None @require_tf class lowerCamelCase( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase__ ( self ): _A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(snake_case_ )[0] _A = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , snake_case_ ) # compare the actual values for a slice. _A = tf.constant( [ [ [8.92_1518, -10.58_9814, -6.467_1307], [-6.396_7156, -13.91_1377, -1.121_1915], [-7.78_1247, -13.95_1557, -3.74_0592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def lowerCAmelCase__ ( self ): _A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) _A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) _A = model(snake_case_ )[0] # compare the actual values for a slice. _A = tf.constant( [ [ [0.1444_3092, 0.5412_5327, 0.324_7739], [0.3034_0484, 0.0052_6676, 0.3107_7722], [0.3227_8043, -0.2498_7096, 0.341_4628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
27
0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int: if n == 1 or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return 0 elif n == 2: return 1 else: lowercase : Union[str, Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int: lowercase : Optional[Any] = 0 lowercase : Any = 2 while digits < n: index += 1 lowercase : Optional[Any] = len(str(fibonacci(_SCREAMING_SNAKE_CASE ) ) ) return index def _snake_case( SCREAMING_SNAKE_CASE__ = 1_000 ) -> int: return fibonacci_digits_index(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
336
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _A = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() ) _A = sum([np.prod(p.size() ) for p in model_parameters] ) return params __A : Union[str, Any] = logging.getLogger(__name__) def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" if metric == "rouge2": _A = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": _A = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": _A = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": _A = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this" ' function.' ) _A = ModelCheckpoint( dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" return EarlyStopping( monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , ) class lowerCamelCase( pl.Callback ): '''simple docstring''' def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ): _A = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(snake_case_ ) @rank_zero_only def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ): logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" ) _A = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results _A = Path(pl_module.hparams.output_dir ) if type_path == "test": _A = od / 'test_results.txt' _A = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _A = od / F"{type_path}_results/{trainer.global_step:05d}.txt" _A = od / F"{type_path}_generations/{trainer.global_step:05d}.txt" results_file.parent.mkdir(exist_ok=snake_case_ ) generations_file.parent.mkdir(exist_ok=snake_case_ ) with open(snake_case_ , 'a+' ) as writer: for key in sorted(snake_case_ ): if key in ["log", "progress_bar", "preds"]: continue _A = metrics[key] if isinstance(snake_case_ , torch.Tensor ): _A = val.item() _A = F"{key}: {val:.6f}\n" writer.write(snake_case_ ) if not save_generations: return if "preds" in metrics: _A = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(snake_case_ ) @rank_zero_only def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ): try: _A = pl_module.model.model.num_parameters() except AttributeError: _A = pl_module.model.num_parameters() _A = count_trainable_parameters(snake_case_ ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(snake_case_ , snake_case_ , 'test' ) @rank_zero_only def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
27
0
import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : int = logging.get_logger(__name__) set_seed(770) UpperCamelCase__ : Dict = { "c_attn": "att_proj", "c_proj": "out_proj", "c_fc": "in_proj", "transformer.": "", "h.": "layers.", "ln_1": "layernorm_1", "ln_2": "layernorm_2", "ln_f": "layernorm_final", "wpe": "position_embeds_layer", "wte": "input_embeds_layer", } UpperCamelCase__ : str = { "text_small": { "repo_id": "suno/bark", "file_name": "text.pt", }, "coarse_small": { "repo_id": "suno/bark", "file_name": "coarse.pt", }, "fine_small": { "repo_id": "suno/bark", "file_name": "fine.pt", }, "text": { "repo_id": "suno/bark", "file_name": "text_2.pt", }, "coarse": { "repo_id": "suno/bark", "file_name": "coarse_2.pt", }, "fine": { "repo_id": "suno/bark", "file_name": "fine_2.pt", }, } UpperCamelCase__ : Optional[int] = os.path.dirname(os.path.abspath(__file__)) UpperCamelCase__ : int = os.path.join(os.path.expanduser("""~"""), """.cache""") UpperCamelCase__ : Union[str, Any] = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""") def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=False ) -> Dict: """simple docstring""" a = model_type if use_small: key += "_small" return os.path.join(_SCREAMING_SNAKE_CASE, REMOTE_MODEL_PATHS[key]['''file_name'''] ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" os.makedirs(_SCREAMING_SNAKE_CASE, exist_ok=_SCREAMING_SNAKE_CASE ) hf_hub_download(repo_id=_SCREAMING_SNAKE_CASE, filename=_SCREAMING_SNAKE_CASE, local_dir=_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=False, snake_case_="text" ) -> int: """simple docstring""" if model_type == "text": a = BarkSemanticModel a = BarkSemanticConfig a = BarkSemanticGenerationConfig elif model_type == "coarse": a = BarkCoarseModel a = BarkCoarseConfig a = BarkCoarseGenerationConfig elif model_type == "fine": a = BarkFineModel a = BarkFineConfig a = BarkFineGenerationConfig else: raise NotImplementedError() a = f"""{model_type}_small""" if use_small else model_type a = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(_SCREAMING_SNAKE_CASE ): logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" ) _download(model_info['''repo_id'''], model_info['''file_name'''] ) a = torch.load(_SCREAMING_SNAKE_CASE, map_location=_SCREAMING_SNAKE_CASE ) # this is a hack a = checkpoint['''model_args'''] if "input_vocab_size" not in model_args: a = model_args['''vocab_size'''] a = model_args['''vocab_size'''] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments a = model_args.pop('''n_head''' ) a = model_args.pop('''n_embd''' ) a = model_args.pop('''n_layer''' ) a = ConfigClass(**checkpoint['''model_args'''] ) a = ModelClass(config=_SCREAMING_SNAKE_CASE ) a = GenerationConfigClass() a = model_generation_config a = checkpoint['''model'''] # fixup checkpoint a = '''_orig_mod.''' for k, v in list(state_dict.items() ): if k.startswith(_SCREAMING_SNAKE_CASE ): # replace part of the key with corresponding layer name in HF implementation a = k[len(_SCREAMING_SNAKE_CASE ) :] for old_layer_name in new_layer_name_dict: a = new_k.replace(_SCREAMING_SNAKE_CASE, new_layer_name_dict[old_layer_name] ) a = state_dict.pop(_SCREAMING_SNAKE_CASE ) a = set(state_dict.keys() ) - set(model.state_dict().keys() ) a = {k for k in extra_keys if not k.endswith('''.attn.bias''' )} a = set(model.state_dict().keys() ) - set(state_dict.keys() ) a = {k for k in missing_keys if not k.endswith('''.attn.bias''' )} if len(_SCREAMING_SNAKE_CASE ) != 0: raise ValueError(f"""extra keys found: {extra_keys}""" ) if len(_SCREAMING_SNAKE_CASE ) != 0: raise ValueError(f"""missing keys: {missing_keys}""" ) model.load_state_dict(_SCREAMING_SNAKE_CASE, strict=_SCREAMING_SNAKE_CASE ) a = model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) a = checkpoint['''best_val_loss'''].item() logger.info(f"""model loaded: {round(n_params/1e6, 1 )}M params, {round(_SCREAMING_SNAKE_CASE, 3 )} loss""" ) model.eval() model.to(_SCREAMING_SNAKE_CASE ) del checkpoint, state_dict return model def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=False, snake_case_="text" ) -> List[str]: """simple docstring""" if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() a = '''cpu''' # do conversion on cpu a = _get_ckpt_path(_SCREAMING_SNAKE_CASE, use_small=_SCREAMING_SNAKE_CASE ) a = _load_model(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, model_type=_SCREAMING_SNAKE_CASE, use_small=_SCREAMING_SNAKE_CASE ) # load bark initial model a = _bark_load_model(_SCREAMING_SNAKE_CASE, '''cpu''', model_type=_SCREAMING_SNAKE_CASE, use_small=_SCREAMING_SNAKE_CASE ) if model_type == "text": a = bark_model['''model'''] if model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) != bark_model.get_num_params(): raise ValueError('''initial and new models don\'t have the same number of parameters''' ) # check if same output as the bark model a = 5 a = 1_0 if model_type in ["text", "coarse"]: a = torch.randint(2_5_6, (batch_size, sequence_length), dtype=torch.int ) a = bark_model(_SCREAMING_SNAKE_CASE )[0] a = model(_SCREAMING_SNAKE_CASE ) # take last logits a = output_new_model_total.logits[:, [-1], :] else: a = 3 a = 8 a = torch.randint(2_5_6, (batch_size, sequence_length, n_codes_total), dtype=torch.int ) a = model(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) a = bark_model(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) a = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('''initial and new outputs don\'t have the same shape''' ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError('''initial and new outputs are not equal''' ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, ) -> Dict: """simple docstring""" a = os.path.join(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) a = BarkSemanticConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE, '''config.json''' ) ) a = BarkCoarseConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE, '''config.json''' ) ) a = BarkFineConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE, '''config.json''' ) ) a = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' ) a = BarkSemanticModel.from_pretrained(_SCREAMING_SNAKE_CASE ) a = BarkCoarseModel.from_pretrained(_SCREAMING_SNAKE_CASE ) a = BarkFineModel.from_pretrained(_SCREAMING_SNAKE_CASE ) a = EncodecModel.from_pretrained('''facebook/encodec_24khz''' ) a = BarkConfig.from_sub_model_configs( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) a = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config, coarseAcoustic.generation_config, fineAcoustic.generation_config ) a = BarkModel(_SCREAMING_SNAKE_CASE ) a = semantic a = coarseAcoustic a = fineAcoustic a = codec a = bark_generation_config Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) bark.save_pretrained(_SCREAMING_SNAKE_CASE, repo_id=_SCREAMING_SNAKE_CASE, push_to_hub=_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""") UpperCamelCase__ : Optional[int] = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
387
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" print('\nThe shortest path matrix using Floyd Warshall algorithm\n' ) for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): if dist[i][j] != float('inf' ): print(int(dist[i][j] ) , end='\t' ) else: print('INF' , end='\t' ) print() def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _A = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): _A = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(_SCREAMING_SNAKE_CASE ): # looping through rows of graph array for i in range(_SCREAMING_SNAKE_CASE ): # looping through columns of graph array for j in range(_SCREAMING_SNAKE_CASE ): if ( dist[i][k] != float('inf' ) and dist[k][j] != float('inf' ) and dist[i][k] + dist[k][j] < dist[i][j] ): _A = dist[i][k] + dist[k][j] _print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return dist, v if __name__ == "__main__": __A : Dict = int(input("Enter number of vertices: ")) __A : Union[str, Any] = int(input("Enter number of edges: ")) __A : List[str] = [[float("inf") for i in range(v)] for j in range(v)] for i in range(v): __A : List[Any] = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("\nEdge ", i + 1) __A : Union[str, Any] = int(input("Enter source:")) __A : List[str] = int(input("Enter destination:")) __A : Union[str, Any] = float(input("Enter weight:")) __A : Any = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
27
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __A : Optional[int] = logging.get_logger(__name__) __A : int = { "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json" ), } class _a ( __snake_case): """simple docstring""" UpperCamelCase__ = """deberta-v2""" def __init__( self : Dict , __UpperCamelCase : Dict=1_2_8_1_0_0 , __UpperCamelCase : Any=1_5_3_6 , __UpperCamelCase : Optional[Any]=2_4 , __UpperCamelCase : int=2_4 , __UpperCamelCase : int=6_1_4_4 , __UpperCamelCase : Union[str, Any]="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : List[Any]=5_1_2 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : Dict=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-7 , __UpperCamelCase : Tuple=False , __UpperCamelCase : Any=-1 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Dict=True , __UpperCamelCase : str=None , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Dict="gelu" , **__UpperCamelCase : Optional[Any] , )->Optional[Any]: super().__init__(**snake_case_ ) _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = relative_attention _UpperCAmelCase = max_relative_positions _UpperCAmelCase = pad_token_id _UpperCAmelCase = position_biased_input # Backwards compatibility if type(snake_case_ ) == str: _UpperCAmelCase = [x.strip() for x in pos_att_type.lower().split('''|''' )] _UpperCAmelCase = pos_att_type _UpperCAmelCase = vocab_size _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = kwargs.get('''pooler_hidden_size''' , snake_case_ ) _UpperCAmelCase = pooler_dropout _UpperCAmelCase = pooler_hidden_act class _a ( __snake_case): """simple docstring""" @property def lowercase__ ( self : Any )->Tuple: if self.task == "multiple-choice": _UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} if self._config.type_vocab_size > 0: return OrderedDict( [('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] ) else: return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] ) @property def lowercase__ ( self : List[str] )->int: return 1_2 def lowercase__ ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] = -1 , __UpperCamelCase : Union[str, Any] = -1 , __UpperCamelCase : Optional[Any] = -1 , __UpperCamelCase : Tuple = False , __UpperCamelCase : Any = None , __UpperCamelCase : Union[str, Any] = 3 , __UpperCamelCase : Tuple = 4_0 , __UpperCamelCase : List[str] = 4_0 , __UpperCamelCase : List[str] = None , )->int: _UpperCAmelCase = super().generate_dummy_inputs(preprocessor=snake_case_ , framework=snake_case_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
602
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file __A : Optional[int] = "Run commands across TPU VMs for initial setup before running `accelerate launch`." def __lowerCAmelCase( _SCREAMING_SNAKE_CASE=None ) -> str: """simple docstring""" if subparsers is not None: _A = subparsers.add_parser('tpu-config' , description=_description ) else: _A = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description ) # Core arguments _A = parser.add_argument_group( 'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' ) config_args.add_argument( '--config_file' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , ) config_args.add_argument( '--tpu_name' , default=_SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , ) config_args.add_argument( '--tpu_zone' , default=_SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , ) _A = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' ) pod_args.add_argument( '--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , ) pod_args.add_argument( '--command_file' , default=_SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , ) pod_args.add_argument( '--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , ) pod_args.add_argument( '--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , ) pod_args.add_argument( '--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , ) pod_args.add_argument( '--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' ) if subparsers is not None: parser.set_defaults(func=_SCREAMING_SNAKE_CASE ) return parser def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" _A = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ): _A = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: _A = defaults.command_file if not args.command and defaults.commands is not None: _A = defaults.commands if not args.tpu_name: _A = defaults.tpu_name if not args.tpu_zone: _A = defaults.tpu_zone if args.accelerate_version == "dev": _A = 'git+https://github.com/huggingface/accelerate.git' elif args.accelerate_version == "latest": _A = 'accelerate -U' elif isinstance(parse(args.accelerate_version ) , _SCREAMING_SNAKE_CASE ): _A = F"accelerate=={args.accelerate_version}" if not args.command_file and not args.command: raise ValueError('You must specify either a command file or a command to run on the pod.' ) if args.command_file: with open(args.command_file , 'r' ) as f: _A = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , _SCREAMING_SNAKE_CASE ): _A = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate _A = ['cd /usr/share'] if args.install_accelerate: new_cmd += [F"pip install {args.accelerate_version}"] new_cmd += args.command _A = '; '.join(_SCREAMING_SNAKE_CASE ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess _A = ['gcloud'] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F"Running {' '.join(_SCREAMING_SNAKE_CASE )}" ) return subprocess.run(_SCREAMING_SNAKE_CASE ) print('Successfully setup pod.' ) def __lowerCAmelCase( ) -> Tuple: """simple docstring""" _A = tpu_command_parser() _A = parser.parse_args() tpu_command_launcher(_SCREAMING_SNAKE_CASE )
27
0
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input UpperCAmelCase__ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" def _UpperCAmelCase ( ) -> Any: _snake_case = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: _snake_case = get_sagemaker_input() else: _snake_case = get_cluster_input() return config def _UpperCAmelCase ( __lowerCamelCase : Optional[Any]=None ) -> Dict: if subparsers is not None: _snake_case = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE ) else: _snake_case = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE ) parser.add_argument( '''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=_SCREAMING_SNAKE_CASE ) return parser def _UpperCAmelCase ( __lowerCamelCase : int ) -> Optional[Any]: _snake_case = get_user_input() if args.config_file is not None: _snake_case = args.config_file else: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) _snake_case = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(_SCREAMING_SNAKE_CASE ) else: config.to_yaml_file(_SCREAMING_SNAKE_CASE ) print(f'''accelerate configuration saved at {config_file}''' ) def _UpperCAmelCase ( ) -> Union[str, Any]: _snake_case = config_command_parser() _snake_case = parser.parse_args() config_command(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
224
from ... import PretrainedConfig __A : Optional[Any] = { "sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json", } class lowerCamelCase( __snake_case ): '''simple docstring''' __magic_name__ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP __magic_name__ = 'nezha' def __init__( self , snake_case_=2_1128 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=64 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.1 , snake_case_=0 , snake_case_=2 , snake_case_=3 , snake_case_=True , **snake_case_ , ): super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = max_relative_position _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = classifier_dropout _A = use_cache
27
0
from ... import PretrainedConfig a_ = { "sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json", } class _UpperCamelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP lowerCamelCase__ ='nezha' def __init__( self : Any , a : Union[str, Any]=2_1128 , a : Optional[Any]=768 , a : Any=12 , a : int=12 , a : Dict=3072 , a : List[str]="gelu" , a : List[str]=0.1 , a : Any=0.1 , a : Tuple=512 , a : int=64 , a : Optional[int]=2 , a : List[str]=0.02 , a : Optional[int]=1e-12 , a : int=0.1 , a : Optional[Any]=0 , a : List[Any]=2 , a : List[Any]=3 , a : str=True , **a : Optional[int] , ) -> str: """simple docstring""" super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ ) SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = hidden_size SCREAMING_SNAKE_CASE : Dict = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : List[str] = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : str = max_relative_position SCREAMING_SNAKE_CASE : int = type_vocab_size SCREAMING_SNAKE_CASE : Optional[int] = initializer_range SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout SCREAMING_SNAKE_CASE : Tuple = use_cache
25
from collections import defaultdict from math import ceil, sqrt def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 1_000_000 , _SCREAMING_SNAKE_CASE = 10 ) -> int: """simple docstring""" _A = defaultdict(_SCREAMING_SNAKE_CASE ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: _A = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: _A = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f"{solution() = }")
27
0
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , ) -> None: """simple docstring""" _SCREAMING_SNAKE_CASE =len(_SCREAMING_SNAKE_CASE ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_SCREAMING_SNAKE_CASE ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) def _lowerCAmelCase ( _UpperCamelCase : Any ) -> None: """simple docstring""" _SCREAMING_SNAKE_CASE =[] depth_first_search([] , [] , [] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Print all the boards for board in boards: for column in board: print(_SCREAMING_SNAKE_CASE ) print('' ) print(len(_SCREAMING_SNAKE_CASE ) , 'solutions were found.' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
405
from math import pi, sqrt, tan def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if side_length < 0: raise ValueError('surface_area_cube() only accepts non-negative values' ) return 6 * side_length**2 def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if length < 0 or breadth < 0 or height < 0: raise ValueError('surface_area_cuboid() only accepts non-negative values' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if radius < 0: raise ValueError('surface_area_sphere() only accepts non-negative values' ) return 4 * pi * radius**2 def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if radius < 0: raise ValueError('surface_area_hemisphere() only accepts non-negative values' ) return 3 * pi * radius**2 def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if radius < 0 or height < 0: raise ValueError('surface_area_cone() only accepts non-negative values' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( 'surface_area_conical_frustum() only accepts non-negative values' ) _A = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if radius < 0 or height < 0: raise ValueError('surface_area_cylinder() only accepts non-negative values' ) return 2 * pi * radius * (height + radius) def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if torus_radius < 0 or tube_radius < 0: raise ValueError('surface_area_torus() only accepts non-negative values' ) if torus_radius < tube_radius: raise ValueError( 'surface_area_torus() does not support spindle or self intersecting tori' ) return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if length < 0 or width < 0: raise ValueError('area_rectangle() only accepts non-negative values' ) return length * width def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if side_length < 0: raise ValueError('area_square() only accepts non-negative values' ) return side_length**2 def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if base < 0 or height < 0: raise ValueError('area_triangle() only accepts non-negative values' ) return (base * height) / 2 def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('area_triangle_three_sides() only accepts non-negative values' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('Given three sides do not form a triangle' ) _A = (sidea + sidea + sidea) / 2 _A = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if base < 0 or height < 0: raise ValueError('area_parallelogram() only accepts non-negative values' ) return base * height def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if basea < 0 or basea < 0 or height < 0: raise ValueError('area_trapezium() only accepts non-negative values' ) return 1 / 2 * (basea + basea) * height def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if radius < 0: raise ValueError('area_circle() only accepts non-negative values' ) return pi * radius**2 def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if radius_x < 0 or radius_y < 0: raise ValueError('area_ellipse() only accepts non-negative values' ) return pi * radius_x * radius_y def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if diagonal_a < 0 or diagonal_a < 0: raise ValueError('area_rhombus() only accepts non-negative values' ) return 1 / 2 * diagonal_a * diagonal_a def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3: raise ValueError( 'area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides' ) elif length < 0: raise ValueError( 'area_reg_polygon() only accepts non-negative values as \ length of a side' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print("[DEMO] Areas of various geometric shapes: \n") print(f"Rectangle: {area_rectangle(10, 20) = }") print(f"Square: {area_square(10) = }") print(f"Triangle: {area_triangle(10, 10) = }") print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }") print(f"Parallelogram: {area_parallelogram(10, 20) = }") print(f"Rhombus: {area_rhombus(10, 20) = }") print(f"Trapezium: {area_trapezium(10, 20, 30) = }") print(f"Circle: {area_circle(20) = }") print(f"Ellipse: {area_ellipse(10, 20) = }") print("\nSurface Areas of various geometric shapes: \n") print(f"Cube: {surface_area_cube(20) = }") print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }") print(f"Sphere: {surface_area_sphere(20) = }") print(f"Hemisphere: {surface_area_hemisphere(20) = }") print(f"Cone: {surface_area_cone(10, 20) = }") print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }") print(f"Cylinder: {surface_area_cylinder(10, 20) = }") print(f"Torus: {surface_area_torus(20, 10) = }") print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }") print(f"Square: {area_reg_polygon(4, 10) = }") print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
27
0
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ] ) class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> List[str]: if self.framework == "pytorch": subprocess.run( f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=snake_case_ , ) assert hasattr(self , "env" ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]: _lowerCAmelCase = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}''' # distributed data settings _lowerCAmelCase = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=snake_case_ , instance_count=snake_case_ , instance_type=self.instance_type , debugger_hook_config=snake_case_ , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=snake_case_ , py_version="py36" , ) def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]: TrainingJobAnalytics(snake_case_ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(2,)] ) def _snake_case ( self , _lowerCAmelCase ) -> List[Any]: # create estimator _lowerCAmelCase = self.create_estimator(snake_case_ ) # run training estimator.fit() # result dataframe _lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) _lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _lowerCAmelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case_ )
18
import numpy as np def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> np.array: """simple docstring""" return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
27
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer lowerCAmelCase : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase : Any = { "vocab_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt" ), "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt", "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt" ), }, "tokenizer_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json" ), "google/electra-base-generator": ( "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json" ), "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json" ), }, } lowerCAmelCase : Optional[int] = { "google/electra-small-generator": 512, "google/electra-base-generator": 512, "google/electra-large-generator": 512, "google/electra-small-discriminator": 512, "google/electra-base-discriminator": 512, "google/electra-large-discriminator": 512, } lowerCAmelCase : Dict = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class a ( __snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : str = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Union[str, Any] = ElectraTokenizer def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ): """simple docstring""" super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , ) __SCREAMING_SNAKE_CASE: Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , snake_case_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , snake_case_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , snake_case_ ) != tokenize_chinese_chars ): __SCREAMING_SNAKE_CASE: str = getattr(snake_case_ , normalizer_state.pop('''type''' ) ) __SCREAMING_SNAKE_CASE: List[str] = do_lower_case __SCREAMING_SNAKE_CASE: List[Any] = strip_accents __SCREAMING_SNAKE_CASE: Any = tokenize_chinese_chars __SCREAMING_SNAKE_CASE: int = normalizer_class(**snake_case_ ) __SCREAMING_SNAKE_CASE: Any = do_lower_case def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=None ): """simple docstring""" __SCREAMING_SNAKE_CASE: Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ): """simple docstring""" __SCREAMING_SNAKE_CASE: Dict = [self.sep_token_id] __SCREAMING_SNAKE_CASE: Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ): """simple docstring""" __SCREAMING_SNAKE_CASE: Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ ) return tuple(snake_case_ )
202
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __A : Optional[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = ["MLukeTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys __A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
27
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL __snake_case = logging.get_logger(__name__) def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[List[ImageInput]]: if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_SCREAMING_SNAKE_CASE ): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""" ) class _a ( __snake_case ): """simple docstring""" A_ = ['''pixel_values'''] def __init__( self : List[str] , lowercase_ : List[Any] = True , lowercase_ : Dict = None , lowercase_ : Optional[Any] = PILImageResampling.BILINEAR , lowercase_ : Optional[int] = True , lowercase_ : List[Any] = None , lowercase_ : Union[str, Any] = True , lowercase_ : Tuple = 1 / 255 , lowercase_ : Union[str, Any] = True , lowercase_ : List[Any] = True , lowercase_ : str = None , lowercase_ : Tuple = None , **lowercase_ : str , ): '''simple docstring''' super().__init__(**snake_case_ ) lowercase_ = size if size is not None else {"""shortest_edge""": 256} lowercase_ = get_size_dict(snake_case_ , default_to_square=snake_case_ ) lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowercase_ = get_size_dict(snake_case_ , param_name="""crop_size""" ) lowercase_ = do_resize lowercase_ = size lowercase_ = do_center_crop lowercase_ = crop_size lowercase_ = resample lowercase_ = do_rescale lowercase_ = rescale_factor lowercase_ = offset lowercase_ = do_normalize lowercase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase__ ( self : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : List[str] = PILImageResampling.BILINEAR , lowercase_ : int = None , **lowercase_ : Union[str, Any] , ): '''simple docstring''' lowercase_ = get_size_dict(snake_case_ , default_to_square=snake_case_ ) if "shortest_edge" in size: lowercase_ = get_resize_output_image_size(snake_case_ , size["""shortest_edge"""] , default_to_square=snake_case_ ) elif "height" in size and "width" in size: lowercase_ = (size["""height"""], size["""width"""]) else: raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : int = None , **lowercase_ : int , ): '''simple docstring''' lowercase_ = get_size_dict(snake_case_ ) if "height" not in size or "width" not in size: raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(snake_case_ , size=(size["""height"""], size["""width"""]) , data_format=snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] = True , lowercase_ : List[str] = None , **lowercase_ : str , ): '''simple docstring''' lowercase_ = image.astype(np.floataa ) if offset: lowercase_ = image - (scale / 2) return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Any = None , **lowercase_ : Dict , ): '''simple docstring''' return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] = None , lowercase_ : List[str] = None , lowercase_ : Union[str, Any] = None , lowercase_ : Dict = None , lowercase_ : Tuple = None , lowercase_ : str = None , lowercase_ : int = None , lowercase_ : int = None , lowercase_ : Optional[Any] = None , lowercase_ : Tuple = None , lowercase_ : Any = None , lowercase_ : Optional[Any] = ChannelDimension.FIRST , ): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. lowercase_ = to_numpy_array(snake_case_ ) if do_resize: lowercase_ = self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) if do_center_crop: lowercase_ = self.center_crop(snake_case_ , size=snake_case_ ) if do_rescale: lowercase_ = self.rescale(image=snake_case_ , scale=snake_case_ , offset=snake_case_ ) if do_normalize: lowercase_ = self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) lowercase_ = to_channel_dimension_format(snake_case_ , snake_case_ ) return image def lowerCamelCase__ ( self : Dict , lowercase_ : Dict , lowercase_ : Any = None , lowercase_ : List[str] = None , lowercase_ : str = None , lowercase_ : str = None , lowercase_ : Dict = None , lowercase_ : Dict = None , lowercase_ : Optional[Any] = None , lowercase_ : List[Any] = None , lowercase_ : Tuple = None , lowercase_ : str = None , lowercase_ : Any = None , lowercase_ : Optional[Any] = None , lowercase_ : int = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ): '''simple docstring''' lowercase_ = do_resize if do_resize is not None else self.do_resize lowercase_ = resample if resample is not None else self.resample lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase_ = do_rescale if do_rescale is not None else self.do_rescale lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ = offset if offset is not None else self.offset lowercase_ = do_normalize if do_normalize is not None else self.do_normalize lowercase_ = image_mean if image_mean is not None else self.image_mean lowercase_ = image_std if image_std is not None else self.image_std lowercase_ = size if size is not None else self.size lowercase_ = get_size_dict(snake_case_ , default_to_square=snake_case_ ) lowercase_ = crop_size if crop_size is not None else self.crop_size lowercase_ = get_size_dict(snake_case_ , param_name="""crop_size""" ) if not valid_images(snake_case_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) lowercase_ = make_batched(snake_case_ ) lowercase_ = [ [ self._preprocess_image( image=snake_case_ , do_resize=snake_case_ , size=snake_case_ , resample=snake_case_ , do_center_crop=snake_case_ , crop_size=snake_case_ , do_rescale=snake_case_ , rescale_factor=snake_case_ , offset=snake_case_ , do_normalize=snake_case_ , image_mean=snake_case_ , image_std=snake_case_ , data_format=snake_case_ , ) for img in video ] for video in videos ] lowercase_ = {"""pixel_values""": videos} return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
451
import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename __A : List[Any] = "http://www.mocksite.com/file1.txt" __A : List[Any] = "\"text\": [\"foo\", \"foo\"]" __A : Dict = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8" class lowerCamelCase: '''simple docstring''' __magic_name__ = 200 __magic_name__ = {'Content-Length': '100'} __magic_name__ = {} def lowerCAmelCase__ ( self , **snake_case_ ): return [bytes(snake_case_ , 'utf-8' )] def __lowerCAmelCase( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return MockResponse() @pytest.mark.parametrize('urls_type' , [str, list, dict] ) def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" import requests monkeypatch.setattr(_SCREAMING_SNAKE_CASE , 'request' , _SCREAMING_SNAKE_CASE ) _A = URL if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _A = url elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _A = [url] elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _A = {'train': url} _A = 'dummy' _A = 'downloads' _A = tmp_path _A = DownloadConfig( cache_dir=os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , use_etag=_SCREAMING_SNAKE_CASE , ) _A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE ) _A = dl_manager.download(_SCREAMING_SNAKE_CASE ) _A = urls for downloaded_paths in [downloaded_paths]: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _A = [downloaded_paths] _A = [urls] elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert "train" in downloaded_paths.keys() _A = downloaded_paths.values() _A = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert downloaded_path == dl_manager.downloaded_paths[input_url] _A = Path(_SCREAMING_SNAKE_CASE ) _A = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() _A = downloaded_path.read_text() assert content == CONTENT _A = downloaded_path.with_suffix('.json' ) assert metadata_downloaded_path.exists() _A = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('paths_type' , [str, list, dict] ) def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" _A = str(_SCREAMING_SNAKE_CASE ) if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _A = filename elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _A = [filename] elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _A = {'train': filename} _A = 'dummy' _A = xz_file.parent _A = 'extracted' _A = DownloadConfig( cache_dir=_SCREAMING_SNAKE_CASE , use_etag=_SCREAMING_SNAKE_CASE , ) _A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE ) _A = dl_manager.extract(_SCREAMING_SNAKE_CASE ) _A = paths for extracted_paths in [extracted_paths]: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _A = [extracted_paths] _A = [paths] elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert "train" in extracted_paths.keys() _A = extracted_paths.values() _A = paths.values() assert extracted_paths for extracted_path, input_path in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert extracted_path == dl_manager.extracted_paths[input_path] _A = Path(_SCREAMING_SNAKE_CASE ) _A = extracted_path.parts assert parts[-1] == hash_url_to_filename(_SCREAMING_SNAKE_CASE , etag=_SCREAMING_SNAKE_CASE ) assert parts[-2] == extracted_subdir assert extracted_path.exists() _A = extracted_path.read_text() _A = text_file.read_text() assert extracted_file_content == expected_file_content def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" assert path.endswith('.jsonl' ) for num_items, line in enumerate(_SCREAMING_SNAKE_CASE , start=1 ): _A = json.loads(line.decode('utf-8' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] ) def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _A = request.getfixturevalue(_SCREAMING_SNAKE_CASE ) _A = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ): _test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert num_jsonl == 2 @pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] ) def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _A = request.getfixturevalue(_SCREAMING_SNAKE_CASE ) _A = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ): _test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert num_tar == 1 assert num_jsonl == 2 def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" _A = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) , start=1 ): assert os.path.basename(_SCREAMING_SNAKE_CASE ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
27
0
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class __snake_case ( __snake_case ): """simple docstring""" lowerCAmelCase_ : List[Any] = 't5' lowerCAmelCase_ : List[str] = ['past_key_values'] lowerCAmelCase_ : List[str] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self :Union[str, Any] , UpperCamelCase__ :Tuple=32_128 , UpperCamelCase__ :Dict=512 , UpperCamelCase__ :Dict=64 , UpperCamelCase__ :Union[str, Any]=2_048 , UpperCamelCase__ :List[str]=6 , UpperCamelCase__ :Tuple=None , UpperCamelCase__ :List[str]=8 , UpperCamelCase__ :List[Any]=32 , UpperCamelCase__ :int=128 , UpperCamelCase__ :List[Any]=0.1 , UpperCamelCase__ :Optional[Any]=1E-6 , UpperCamelCase__ :Optional[Any]=1.0 , UpperCamelCase__ :Any="relu" , UpperCamelCase__ :Union[str, Any]=True , UpperCamelCase__ :Optional[int]=True , UpperCamelCase__ :Optional[Any]=0 , UpperCamelCase__ :Any=1 , **UpperCamelCase__ :Optional[int] , ): _a = vocab_size _a = d_model _a = d_kv _a = d_ff _a = num_layers _a = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _a = num_heads _a = relative_attention_num_buckets _a = relative_attention_max_distance _a = dropout_rate _a = layer_norm_epsilon _a = initializer_factor _a = feed_forward_proj _a = use_cache _a = self.feed_forward_proj.split("-" ) _a = act_info[-1] _a = act_info[0] == "gated" if len(snake_case_ ) > 1 and act_info[0] != "gated" or len(snake_case_ ) > 2: raise ValueError( f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "\'gated-gelu\' or \'relu\'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": _a = "gelu_new" super().__init__( pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ , ) class __snake_case ( __snake_case ): """simple docstring""" @property def SCREAMING_SNAKE_CASE_ ( self :List[str] ): _a = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: _a = "past_encoder_sequence + sequence" _a = {0: "batch"} _a = {0: "batch", 1: "past_decoder_sequence + sequence"} else: _a = {0: "batch", 1: "decoder_sequence"} _a = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(snake_case_ , direction="inputs" ) return common_inputs @property def SCREAMING_SNAKE_CASE_ ( self :Dict ): return 13
388
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" _A = int(number**0.5 ) return number == sq * sq def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[int, int]: """simple docstring""" _A = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den _A = x_den * y_den * z_den _A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 35 ) -> int: """simple docstring""" _A = set() _A = 42 _A = Fraction(0 ) _A = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 _A = x_num * y_den + x_den * y_num _A = x_den * y_den _A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _A = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 _A = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) _A = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): _A = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _A = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _A = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 _A = x_num * y_num _A = x_den * y_num + x_num * y_den _A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _A = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 _A = x_num * x_num * y_num * y_num _A = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): _A = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _A = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _A = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(f"{solution() = }")
27
0
from __future__ import annotations import time import numpy as np _UpperCamelCase = [8, 5, 9, 7] _UpperCamelCase = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] _UpperCamelCase = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class __lowercase : def __init__( self , A_ , A_ , A_ , ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = claim_vector __lowerCAmelCase : List[str] = allocated_resources_table __lowerCAmelCase : Optional[Any] = maximum_claim_table def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' return {self.__need().index(snake_case_ ): i for i in self.__need()} def UpperCamelCase__ ( self , **A_ ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : str = self.__need() __lowerCAmelCase : Tuple = self.__allocated_resources_table __lowerCAmelCase : str = self.__available_resources() __lowerCAmelCase : List[str] = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('''_''' * 50 + '''\n''' ) while need_list: __lowerCAmelCase : List[str] = False for each_need in need_list: __lowerCAmelCase : Tuple = True for index, need in enumerate(snake_case_ ): if need > available_resources[index]: __lowerCAmelCase : str = False break if execution: __lowerCAmelCase : List[str] = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: __lowerCAmelCase : List[Any] = original_need_index print(f"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(snake_case_ ) # update available/freed resources stack __lowerCAmelCase : Tuple = np.array(snake_case_ ) + np.array( alloc_resources_table[process_number] ) print( '''Updated available resource stack for processes: ''' + ''' '''.join([str(snake_case_ ) for x in available_resources] ) ) break if safe: print('''The process is in a safe state.\n''' ) else: print('''System in unsafe state. Aborting...\n''' ) break def UpperCamelCase__ ( self ) ->str: '''simple docstring''' print(''' ''' * 9 + '''Allocated Resource Table''' ) for item in self.__allocated_resources_table: print( f"""P{self.__allocated_resources_table.index(snake_case_ ) + 1}""" + ''' '''.join(f"""{it:>8}""" for it in item ) + '''\n''' ) print(''' ''' * 9 + '''System Resource Table''' ) for item in self.__maximum_claim_table: print( f"""P{self.__maximum_claim_table.index(snake_case_ ) + 1}""" + ''' '''.join(f"""{it:>8}""" for it in item ) + '''\n''' ) print( '''Current Usage by Active Processes: ''' + ''' '''.join(str(snake_case_ ) for x in self.__claim_vector ) ) print( '''Initial Available Resources: ''' + ''' '''.join(str(snake_case_ ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
492
from __future__ import annotations import math def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> list[int]: """simple docstring""" if num <= 0: _A = F"{num}: Invalid input, please enter a positive integer." raise ValueError(_SCREAMING_SNAKE_CASE ) _A = [True] * (num + 1) _A = [] _A = 2 _A = int(math.sqrt(_SCREAMING_SNAKE_CASE ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(_SCREAMING_SNAKE_CASE ) # Set multiples of start be False for i in range(start * start , num + 1 , _SCREAMING_SNAKE_CASE ): if sieve[i] is True: _A = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(_SCREAMING_SNAKE_CASE ) return prime if __name__ == "__main__": print(prime_sieve(int(input("Enter a positive integer: ").strip())))
27
0
def _snake_case( ) -> Union[str, Any]: for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]: lowercase : Optional[int] = 1 lowercase : List[Any] = 2 while i * i <= n: lowercase : Union[str, Any] = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _snake_case( ) -> Any: return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 ) if __name__ == "__main__": print(solution())
336
__A : Dict = "Alexander Joslin" import operator as op from .stack import Stack def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" _A = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub} _A = Stack() _A = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(_SCREAMING_SNAKE_CASE ) ) elif i in operators: # RULE 2 operator_stack.push(_SCREAMING_SNAKE_CASE ) elif i == ")": # RULE 4 _A = operator_stack.peek() operator_stack.pop() _A = operand_stack.peek() operand_stack.pop() _A = operand_stack.peek() operand_stack.pop() _A = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) operand_stack.push(_SCREAMING_SNAKE_CASE ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": __A : Any = "(5 + ((4 * 2) * (2 + 3)))" # answer = 45 print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
27
0
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase__ : Tuple = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCamelCase__ : List[str] = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCamelCase__ : List[Any] = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCamelCase__ : Optional[Any] = { "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } UpperCamelCase__ : Any = { "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } UpperCamelCase__ : int = { "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } UpperCamelCase__ : Union[str, Any] = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } UpperCamelCase__ : List[Any] = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } UpperCamelCase__ : List[Any] = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class lowerCamelCase_ ( __snake_case ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE_ = DPRContextEncoderTokenizer class lowerCamelCase_ ( __snake_case ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE_ = DPRQuestionEncoderTokenizer UpperCamelCase__ : List[str] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) UpperCamelCase__ : Optional[int] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) UpperCamelCase__ : Union[str, Any] = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(__snake_case ) class lowerCamelCase_ : def __call__( self : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Dict = None ,__lowerCamelCase : Union[str, Any] = None ,__lowerCamelCase : Optional[Any] = False ,__lowerCamelCase : Dict = False ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : List[str] = None ,__lowerCamelCase : Any = None ,**__lowerCamelCase : Any ,): '''simple docstring''' if titles is None and texts is None: return super().__call__( snake_case_ ,padding=snake_case_ ,truncation=snake_case_ ,max_length=snake_case_ ,return_tensors=snake_case_ ,return_attention_mask=snake_case_ ,**snake_case_ ,) elif titles is None or texts is None: a = titles if texts is None else texts return super().__call__( snake_case_ ,snake_case_ ,padding=snake_case_ ,truncation=snake_case_ ,max_length=snake_case_ ,return_tensors=snake_case_ ,return_attention_mask=snake_case_ ,**snake_case_ ,) a = titles if not isinstance(snake_case_ ,snake_case_ ) else [titles] a = texts if not isinstance(snake_case_ ,snake_case_ ) else [texts] a = len(snake_case_ ) a = questions if not isinstance(snake_case_ ,snake_case_ ) else [questions] * n_passages assert len(snake_case_ ) == len( snake_case_ ), F"""There should be as many titles than texts but got {len(snake_case_ )} titles and {len(snake_case_ )} texts.""" a = super().__call__(snake_case_ ,snake_case_ ,padding=snake_case_ ,truncation=snake_case_ )['''input_ids'''] a = super().__call__(snake_case_ ,add_special_tokens=snake_case_ ,padding=snake_case_ ,truncation=snake_case_ )['''input_ids'''] a = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(snake_case_ ,snake_case_ ) ] } if return_attention_mask is not False: a = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) a = attention_mask return self.pad(snake_case_ ,padding=snake_case_ ,max_length=snake_case_ ,return_tensors=snake_case_ ) def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Tuple ,__lowerCamelCase : List[str] ,__lowerCamelCase : Optional[Any] = 16 ,__lowerCamelCase : Optional[Any] = 64 ,__lowerCamelCase : Optional[Any] = 4 ,): '''simple docstring''' a = reader_input['''input_ids'''] a , a , a = reader_output[:3] a = len(snake_case_ ) a = sorted(range(snake_case_ ) ,reverse=snake_case_ ,key=relevance_logits.__getitem__ ) a = [] for doc_id in sorted_docs: a = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence a = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: a = sequence_ids.index(self.pad_token_id ) else: a = len(snake_case_ ) a = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=snake_case_ ,top_spans=snake_case_ ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=snake_case_ ,start_index=snake_case_ ,end_index=snake_case_ ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(snake_case_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : str ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Dict ,): '''simple docstring''' a = [] for start_index, start_score in enumerate(snake_case_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) a = sorted(snake_case_ ,key=lambda __lowerCamelCase : x[1] ,reverse=snake_case_ ) a = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]""" a = end_index - start_index + 1 assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(snake_case_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__snake_case ) class lowerCamelCase_ ( __snake_case , __snake_case ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = READER_PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = READER_PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask'] SCREAMING_SNAKE_CASE_ = DPRReaderTokenizer
387
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCamelCase( unittest.TestCase ): '''simple docstring''' @property def lowerCAmelCase__ ( self ): torch.manual_seed(0 ) _A = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def lowerCAmelCase__ ( self ): _A = self.dummy_uncond_unet _A = KarrasVeScheduler() _A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) _A = torch.manual_seed(0 ) _A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' ).images _A = torch.manual_seed(0 ) _A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' , return_dict=snake_case_ )[0] _A = image[0, -3:, -3:, -1] _A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class lowerCamelCase( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self ): _A = 'google/ncsnpp-celebahq-256' _A = UNetaDModel.from_pretrained(snake_case_ ) _A = KarrasVeScheduler() _A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) _A = torch.manual_seed(0 ) _A = pipe(num_inference_steps=20 , generator=snake_case_ , output_type='numpy' ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _A = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
27
0
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class _a ( __snake_case , __snake_case): """simple docstring""" UpperCamelCase__ = 1 @register_to_config def __init__( self : Dict , __UpperCamelCase : Tuple=2_0_0_0 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : str=2_0 , __UpperCamelCase : Dict=1e-3 )->Union[str, Any]: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any = None )->List[str]: _UpperCAmelCase = torch.linspace(1 , self.config.sampling_eps , snake_case_ , device=snake_case_ ) def lowercase__ ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict=None )->Optional[Any]: if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score _UpperCAmelCase = ( -0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) _UpperCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) _UpperCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): _UpperCAmelCase = std.unsqueeze(-1 ) _UpperCAmelCase = -score / std # compute _UpperCAmelCase = -1.0 / len(self.timesteps ) _UpperCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) _UpperCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): _UpperCAmelCase = beta_t.unsqueeze(-1 ) _UpperCAmelCase = -0.5 * beta_t * x _UpperCAmelCase = torch.sqrt(snake_case_ ) _UpperCAmelCase = drift - diffusion**2 * score _UpperCAmelCase = x + drift * dt # add noise _UpperCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=snake_case_ , device=x.device , dtype=x.dtype ) _UpperCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self : Union[str, Any] )->Optional[int]: return self.config.num_train_timesteps
602
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset __A : str = random.Random() def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: """simple docstring""" if rng is None: _A = global_rng _A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCamelCase( unittest.TestCase ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_=7 , snake_case_=400 , snake_case_=2000 , snake_case_=2048 , snake_case_=128 , snake_case_=1 , snake_case_=512 , snake_case_=30 , snake_case_=4_4100 , ): _A = parent _A = batch_size _A = min_seq_length _A = max_seq_length _A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _A = spectrogram_length _A = feature_size _A = num_audio_channels _A = hop_length _A = chunk_length _A = sampling_rate def lowerCAmelCase__ ( self ): return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def lowerCAmelCase__ ( self , snake_case_=False , snake_case_=False ): def _flatten(snake_case_ ): return list(itertools.chain(*snake_case_ ) ) if equal_length: _A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _A = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _A = [np.asarray(snake_case_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCamelCase( __snake_case , unittest.TestCase ): '''simple docstring''' __magic_name__ = TvltFeatureExtractor def lowerCAmelCase__ ( self ): _A = TvltFeatureExtractionTester(self ) def lowerCAmelCase__ ( self ): _A = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(snake_case_ , 'spectrogram_length' ) ) self.assertTrue(hasattr(snake_case_ , 'feature_size' ) ) self.assertTrue(hasattr(snake_case_ , 'num_audio_channels' ) ) self.assertTrue(hasattr(snake_case_ , 'hop_length' ) ) self.assertTrue(hasattr(snake_case_ , 'chunk_length' ) ) self.assertTrue(hasattr(snake_case_ , 'sampling_rate' ) ) def lowerCAmelCase__ ( self ): _A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _A = feat_extract_first.save_pretrained(snake_case_ )[0] check_json_file_has_correct_format(snake_case_ ) _A = self.feature_extraction_class.from_pretrained(snake_case_ ) _A = feat_extract_first.to_dict() _A = feat_extract_second.to_dict() _A = dict_first.pop('mel_filters' ) _A = dict_second.pop('mel_filters' ) self.assertTrue(np.allclose(snake_case_ , snake_case_ ) ) self.assertEqual(snake_case_ , snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _A = os.path.join(snake_case_ , 'feat_extract.json' ) feat_extract_first.to_json_file(snake_case_ ) _A = self.feature_extraction_class.from_json_file(snake_case_ ) _A = feat_extract_first.to_dict() _A = feat_extract_second.to_dict() _A = dict_first.pop('mel_filters' ) _A = dict_second.pop('mel_filters' ) self.assertTrue(np.allclose(snake_case_ , snake_case_ ) ) self.assertEqual(snake_case_ , snake_case_ ) def lowerCAmelCase__ ( self ): # Initialize feature_extractor _A = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 _A = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _A = [np.asarray(snake_case_ ) for speech_input in speech_inputs] # Test not batched input _A = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched _A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking _A = feature_extractor( snake_case_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=snake_case_ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. _A = [floats_list((1, x) )[0] for x in (800, 800, 800)] _A = np.asarray(snake_case_ ) _A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def lowerCAmelCase__ ( self , snake_case_ ): _A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech _A = ds.sort('id' ).select(range(snake_case_ ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def lowerCAmelCase__ ( self ): _A = self._load_datasamples(1 ) _A = TvltFeatureExtractor() _A = feature_extractor(snake_case_ , return_tensors='pt' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 192, 128) ) _A = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case_ , atol=1E-4 ) )
27
0
'''simple docstring''' import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCamelCase_ = "." # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) UpperCamelCase_ = [ "Assert", "AssignVariableOp", "EmptyTensorList", "MergeV2Checkpoints", "ReadVariableOp", "ResourceGather", "RestoreV2", "SaveV2", "ShardedFilename", "StatefulPartitionedCall", "StaticRegexFullMatch", "VarHandleOp", ] def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Dict ,__UpperCamelCase: Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : int = SavedModel() SCREAMING_SNAKE_CASE : str = [] with open(os.path.join(__UpperCamelCase ,'utils' ,'tf_ops' ,'onnx.json' ) ) as f: SCREAMING_SNAKE_CASE : List[Any] = json.load(__UpperCamelCase )['opsets'] for i in range(1 ,opset + 1 ): onnx_ops.extend(onnx_opsets[str(__UpperCamelCase )] ) with open(__UpperCamelCase ,'rb' ) as f: saved_model.ParseFromString(f.read() ) SCREAMING_SNAKE_CASE : Dict = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want SCREAMING_SNAKE_CASE : Optional[int] = sorted(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(__UpperCamelCase ) if strict and len(__UpperCamelCase ) > 0: raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops ) elif len(__UpperCamelCase ) > 0: print(f"Found the following incompatible ops for the opset {opset}:" ) print(*__UpperCamelCase ,sep='\n' ) else: print(f"The saved model {saved_model_path} can properly be converted with ONNX." ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).") parser.add_argument( "--opset", default=1_2, type=int, help="The ONNX opset against which the model has to be tested." ) parser.add_argument( "--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model." ) parser.add_argument( "--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)" ) UpperCamelCase_ = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
28
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase_ = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } UpperCamelCase_ = { "distilbert-base-uncased": 5_1_2, "distilbert-base-uncased-distilled-squad": 5_1_2, "distilbert-base-cased": 5_1_2, "distilbert-base-cased-distilled-squad": 5_1_2, "distilbert-base-german-cased": 5_1_2, "distilbert-base-multilingual-cased": 5_1_2, } UpperCamelCase_ = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION A : Optional[int] = ['''input_ids''', '''attention_mask'''] A : List[Any] = DistilBertTokenizer def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ): '''simple docstring''' super().__init__( A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase', A ) != do_lower_case or normalizer_state.get('strip_accents', A ) != strip_accents or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : List[str] = strip_accents SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A ) SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case def UpperCamelCase_ ( self, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A ) return tuple(A )
28
1
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or number < 0: raise ValueError('Input must be a non-negative integer' ) SCREAMING_SNAKE_CASE : Optional[Any] = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 UpperCamelCase_ = get_tests_dir("fixtures") class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock() SCREAMING_SNAKE_CASE : List[Any] = 500 SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : Any = HTTPError SCREAMING_SNAKE_CASE : Any = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request', return_value=A ) as mock_head: SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' ) def UpperCamelCase_ ( self ): '''simple docstring''' with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' ) SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained( 'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' ) self.assertIsNotNone(A ) @is_staging_test class _a ( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id='test-image-processor' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' ) except HTTPError: pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, ) SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained( F"{USER}/test-dynamic-image-processor", trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
28
1
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Dict = '''char''' A : Any = '''bpe''' A : Dict = '''wp''' UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = ['''image_processor''', '''char_tokenizer'''] A : int = '''ViTImageProcessor''' A : List[str] = '''MgpstrTokenizer''' def __init__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', A, ) SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(A, A ) def __call__( self, A=None, A=None, A=None, **A ): '''simple docstring''' if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A ) if text is not None: SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE : Any = encodings['input_ids'] return inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : Tuple = [] for i in range(A ): SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]] SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]] SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) SCREAMING_SNAKE_CASE : List[Any] = {} SCREAMING_SNAKE_CASE : int = final_strs SCREAMING_SNAKE_CASE : Any = final_scores SCREAMING_SNAKE_CASE : Dict = char_strs SCREAMING_SNAKE_CASE : Any = bpe_strs SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs return out def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if format == DecodeType.CHARACTER: SCREAMING_SNAKE_CASE : List[Any] = self.char_decode SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : str = '[s]' elif format == DecodeType.BPE: SCREAMING_SNAKE_CASE : str = self.bpe_decode SCREAMING_SNAKE_CASE : str = 2 SCREAMING_SNAKE_CASE : List[str] = '#' elif format == DecodeType.WORDPIECE: SCREAMING_SNAKE_CASE : Any = self.wp_decode SCREAMING_SNAKE_CASE : Tuple = 102 SCREAMING_SNAKE_CASE : List[Any] = '[SEP]' else: raise ValueError(F"Format {format} is not supported." ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], [] SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 ) SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A ) SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:] SCREAMING_SNAKE_CASE : List[Any] = decoder(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 ) SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:] for index in range(A ): SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A ) SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos] SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist() SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1 SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1] SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A ) conf_scores.append(A ) return dec_strs, conf_scores def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )] return decode_strs def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )] return decode_strs
28
'''simple docstring''' class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = val SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Union[str, Any] = None def UpperCamelCase_ ( self, A ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: SCREAMING_SNAKE_CASE : Optional[int] = Node(A ) else: self.left.insert(A ) elif val > self.val: if self.right is None: SCREAMING_SNAKE_CASE : int = Node(A ) else: self.right.insert(A ) else: SCREAMING_SNAKE_CASE : int = val def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ): """simple docstring""" if root: inorder(root.left ,__UpperCamelCase ) res.append(root.val ) inorder(root.right ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[Any] ): """simple docstring""" if len(__UpperCamelCase ) == 0: return arr SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] ) for i in range(1 ,len(__UpperCamelCase ) ): root.insert(arr[i] ) # Traverse BST in order. SCREAMING_SNAKE_CASE : Dict = [] inorder(__UpperCamelCase ,__UpperCamelCase ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
28
1
'''simple docstring''' import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' @register_to_config def __init__( self, *, A = 4, A = 768, A, A, ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.zeros(A ) ) # parameters for additional clip time embeddings SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(A, A ) SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(A, A ) # parameters for encoder hidden states SCREAMING_SNAKE_CASE : List[Any] = clip_extra_context_tokens SCREAMING_SNAKE_CASE : str = nn.Linear( A, self.clip_extra_context_tokens * cross_attention_dim ) SCREAMING_SNAKE_CASE : int = nn.Linear(A, A ) SCREAMING_SNAKE_CASE : int = nn.LayerNorm(A ) def UpperCamelCase_ ( self, *, A, A, A, A ): '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings SCREAMING_SNAKE_CASE : Optional[Any] = image_embeddings.shape[0] SCREAMING_SNAKE_CASE : Union[str, Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) SCREAMING_SNAKE_CASE : Dict = classifier_free_guidance_embeddings.expand( A, -1 ) SCREAMING_SNAKE_CASE : List[str] = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] SCREAMING_SNAKE_CASE : Union[str, Any] = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... SCREAMING_SNAKE_CASE : Any = self.embedding_proj(A ) SCREAMING_SNAKE_CASE : List[Any] = self.clip_image_embeddings_project_to_time_embeddings(A ) SCREAMING_SNAKE_CASE : List[Any] = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" SCREAMING_SNAKE_CASE : Tuple = self.clip_extra_context_tokens_proj(A ) SCREAMING_SNAKE_CASE : Optional[int] = clip_extra_context_tokens.reshape(A, -1, self.clip_extra_context_tokens ) SCREAMING_SNAKE_CASE : int = clip_extra_context_tokens.permute(0, 2, 1 ) SCREAMING_SNAKE_CASE : Dict = self.encoder_hidden_states_proj(A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.text_encoder_hidden_states_norm(A ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
28
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ): """simple docstring""" from .. import __version__ SCREAMING_SNAKE_CASE : int = take_from SCREAMING_SNAKE_CASE : Optional[int] = () if not isinstance(args[0] ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : List[str] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ): raise ValueError( f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" f" version {__version__} is >= {version_name}" ) SCREAMING_SNAKE_CASE : Tuple = None if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(__UpperCamelCase ,__UpperCamelCase ): values += (getattr(__UpperCamelCase ,__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else '' warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0: SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1] SCREAMING_SNAKE_CASE : Any = call_frame.filename SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" ) if len(__UpperCamelCase ) == 0: return elif len(__UpperCamelCase ) == 1: return values[0] return values
28
1
'''simple docstring''' from typing import Any def lowercase__( __UpperCamelCase: list ): """simple docstring""" if not input_list: return [] SCREAMING_SNAKE_CASE : Union[str, Any] = [input_list.count(__UpperCamelCase ) for value in input_list] SCREAMING_SNAKE_CASE : Any = max(__UpperCamelCase ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(__UpperCamelCase ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
1
'''simple docstring''' from __future__ import annotations def lowercase__( __UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 2 SCREAMING_SNAKE_CASE : Tuple = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(__UpperCamelCase ) if n > 1: factors.append(__UpperCamelCase ) return factors if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise TypeError('Input value must be an \'int\' type' ) SCREAMING_SNAKE_CASE : int = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
28
1
'''simple docstring''' from __future__ import annotations def lowercase__( __UpperCamelCase: list[int] ): # This function is recursive """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = len(__UpperCamelCase ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else SCREAMING_SNAKE_CASE : Optional[Any] = array[0] SCREAMING_SNAKE_CASE : List[str] = False SCREAMING_SNAKE_CASE : Any = 1 SCREAMING_SNAKE_CASE : list[int] = [] while not is_found and i < array_length: if array[i] < pivot: SCREAMING_SNAKE_CASE : int = True SCREAMING_SNAKE_CASE : Optional[int] = [element for element in array[i:] if element >= array[i]] SCREAMING_SNAKE_CASE : List[str] = longest_subsequence(__UpperCamelCase ) if len(__UpperCamelCase ) > len(__UpperCamelCase ): SCREAMING_SNAKE_CASE : List[str] = temp_array else: i += 1 SCREAMING_SNAKE_CASE : Tuple = [element for element in array[1:] if element >= pivot] SCREAMING_SNAKE_CASE : Optional[Any] = [pivot, *longest_subsequence(__UpperCamelCase )] if len(__UpperCamelCase ) > len(__UpperCamelCase ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ): '''simple docstring''' if tokenize_kwargs is None: SCREAMING_SNAKE_CASE : Optional[int] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) SCREAMING_SNAKE_CASE : Tuple = truncation SCREAMING_SNAKE_CASE : int = tokenize_kwargs SCREAMING_SNAKE_CASE : Optional[Any] = {} if return_tensors is not None: SCREAMING_SNAKE_CASE : Optional[int] = return_tensors return preprocess_params, {}, postprocess_params def UpperCamelCase_ ( self, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.framework SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A ) return model_inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model(**A ) return model_outputs def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self, *A, **A ): '''simple docstring''' return super().__call__(*A, **A )
28
1
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging UpperCamelCase_ = logging.get_logger(__name__) def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: List[Any] ): """simple docstring""" try: with open(__UpperCamelCase ,'rb' ) as flax_state_f: SCREAMING_SNAKE_CASE : Any = from_bytes(__UpperCamelCase ,flax_state_f.read() ) except UnpicklingError as e: try: with open(__UpperCamelCase ) as f: if f.read().startswith('version' ): raise OSError( 'You seem to have cloned a repository without having git-lfs installed. Please' ' install git-lfs and run `git lfs install` followed by `git lfs pull` in the' ' folder you cloned.' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. " ) return load_flax_weights_in_pytorch_model(__UpperCamelCase ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: Optional[int] ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( 'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise # check if we have bf16 weights SCREAMING_SNAKE_CASE : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda __UpperCamelCase : x.dtype == jnp.bfloataa ,__UpperCamelCase ) ).values() if any(__UpperCamelCase ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( 'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ' 'before loading those in PyTorch model.' ) SCREAMING_SNAKE_CASE : List[str] = jax.tree_util.tree_map( lambda __UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = '' SCREAMING_SNAKE_CASE : List[Any] = flatten_dict(__UpperCamelCase ,sep='.' ) SCREAMING_SNAKE_CASE : int = pt_model.state_dict() # keep track of unexpected & missing keys SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : str = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): SCREAMING_SNAKE_CASE : Tuple = flax_key_tuple.split('.' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: SCREAMING_SNAKE_CASE : Any = flax_key_tuple_array[:-1] + ['weight'] SCREAMING_SNAKE_CASE : List[str] = jnp.transpose(__UpperCamelCase ,(3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": SCREAMING_SNAKE_CASE : int = flax_key_tuple_array[:-1] + ['weight'] SCREAMING_SNAKE_CASE : List[Any] = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": SCREAMING_SNAKE_CASE : Tuple = flax_key_tuple_array[:-1] + ['weight'] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(__UpperCamelCase ): SCREAMING_SNAKE_CASE : Union[str, Any] = ( flax_key_tuple_string.replace('_0' ,'.0' ) .replace('_1' ,'.1' ) .replace('_2' ,'.2' ) .replace('_3' ,'.3' ) .replace('_4' ,'.4' ) .replace('_5' ,'.5' ) .replace('_6' ,'.6' ) .replace('_7' ,'.7' ) .replace('_8' ,'.8' ) .replace('_9' ,'.9' ) ) SCREAMING_SNAKE_CASE : List[str] = '.'.join(__UpperCamelCase ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." ) else: # add weight to pytorch dict SCREAMING_SNAKE_CASE : Tuple = np.asarray(__UpperCamelCase ) if not isinstance(__UpperCamelCase ,np.ndarray ) else flax_tensor SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(__UpperCamelCase ) # remove from missing keys missing_keys.remove(__UpperCamelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(__UpperCamelCase ) pt_model.load_state_dict(__UpperCamelCase ) # re-transform missing_keys to list SCREAMING_SNAKE_CASE : List[Any] = list(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: logger.warning( 'Some weights of the Flax model were not used when initializing the PyTorch model' f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" ' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This' f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" ' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a' ' FlaxBertForSequenceClassification model).' ) if len(__UpperCamelCase ) > 0: logger.warning( f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" ' use it for predictions and inference.' ) return pt_model
28
'''simple docstring''' from __future__ import annotations import queue class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = data SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : List[str] = None def lowercase__( ): """simple docstring""" print('\n********Press N to stop entering at any point of time********\n' ) SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower() SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) ) q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: " SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = left_node q.put(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: " SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = right_node q.put(__UpperCamelCase ) raise def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return print(node.data ,end=',' ) pre_order(node.left ) pre_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return in_order(node.left ) print(node.data ,end=',' ) in_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data ,end=',' ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Optional[int] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Union[str, Any] = [] while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__UpperCamelCase ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : Optional[Any] = node while n or stack: while n: # start from root node, find its left child print(n.data ,end=',' ) stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE : List[Any] = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE : Any = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : int = node while n or stack: while n: stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = n.left SCREAMING_SNAKE_CASE : Tuple = stack.pop() print(n.data ,end=',' ) SCREAMING_SNAKE_CASE : str = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], [] SCREAMING_SNAKE_CASE : Optional[int] = node stacka.append(__UpperCamelCase ) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__UpperCamelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data ,end=',' ) def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ): """simple docstring""" if not s: return "\n" + width * char SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 ) return f"{left * char} {s} {(left + extra) * char}" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) UpperCamelCase_ = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 5_0 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
28
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, *A, **A ): '''simple docstring''' warnings.warn( 'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DeiTImageProcessor instead.', A, ) super().__init__(*A, **A )
28
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class _a : '''simple docstring''' def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = device SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A ) SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std ) SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 ) SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.resize(A ) SCREAMING_SNAKE_CASE : Any = self.center_crop(A ) SCREAMING_SNAKE_CASE : str = self.normalize(A ) return images def __call__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A ) SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class _a ( nn.Module ): '''simple docstring''' def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device() if vqgan: SCREAMING_SNAKE_CASE : Optional[Any] = vqgan else: SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A ) self.vqgan.eval() if clip: SCREAMING_SNAKE_CASE : List[str] = clip else: SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' ) self.clip.to(self.device ) SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = iterations SCREAMING_SNAKE_CASE : Tuple = lr SCREAMING_SNAKE_CASE : Tuple = log SCREAMING_SNAKE_CASE : str = make_grid SCREAMING_SNAKE_CASE : Dict = return_val SCREAMING_SNAKE_CASE : Union[str, Any] = quantize SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [] if output_path is None: SCREAMING_SNAKE_CASE : int = './animation.gif' if input_path is None: SCREAMING_SNAKE_CASE : Optional[int] = self.save_path SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) ) if not len(A ): raise ValueError( 'No images found in save path, aborting (did you pass save_intermediate=True to the generate' ' function?)' ) if len(A ) == 1: print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' ) SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A ) SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A ) if extend_frames: SCREAMING_SNAKE_CASE : List[str] = 1.5 SCREAMING_SNAKE_CASE : int = 3 for file_name in paths: if file_name.endswith('.png' ): images.append(imageio.imread(A ) ) imageio.mimsave(A, A, duration=A ) print(F"gif saved to {output_path}" ) def UpperCamelCase_ ( self, A=None, A=None ): '''simple docstring''' if not (path or img): raise ValueError('Input either path or tensor' ) if img is not None: raise NotImplementedError SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device ) SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A ) SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A ) return z def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_() SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector if self.quantize: SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A ) else: SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent return self.vqgan.decode(A ) def UpperCamelCase_ ( self, A, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A ) SCREAMING_SNAKE_CASE : str = self.clip(**A ) SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image if weights is not None: SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) ) if neg_prompts: SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] ) else: SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device ) SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A ) return loss def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A ) SCREAMING_SNAKE_CASE : Dict = loop_post_process(A ) SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A ) print('CLIP loss', A ) if self.log: wandb.log({'CLIP Loss': clip_loss} ) clip_loss.backward(retain_graph=A ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' wandb.init(reinit=A, project='face-editor' ) wandb.config.update({'Positive Prompts': positive_prompts} ) wandb.config.update({'Negative Prompts': negative_prompts} ) wandb.config.update({'lr': self.lr, 'iterations': self.iterations} ) if image_path: SCREAMING_SNAKE_CASE : Tuple = Image.open(A ) SCREAMING_SNAKE_CASE : int = image.resize((256, 256) ) wandb.log('Original Image', wandb.Image(A ) ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if not prompts: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Dict = [] if isinstance(A, A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )] for prompt in prompts: if isinstance(A, (tuple, list) ): SCREAMING_SNAKE_CASE : List[str] = prompt[0] SCREAMING_SNAKE_CASE : Any = float(prompt[1] ) elif ":" in prompt: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' ) SCREAMING_SNAKE_CASE : Any = float(A ) else: SCREAMING_SNAKE_CASE : Dict = prompt SCREAMING_SNAKE_CASE : List[Any] = 1.0 processed_prompts.append(A ) weights.append(A ) return { "prompts": processed_prompts, "weights": torch.tensor(A, device=self.device ), } def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ): '''simple docstring''' if image_path: SCREAMING_SNAKE_CASE : int = self._get_latent(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(A, A, A ) assert pos_prompts, "You must provide at least one positive prompt." SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A ) if save_final and save_path is None: SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) ) if not os.path.exists(A ): os.makedirs(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp() os.makedirs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = save_path SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print('Original Image' ) show_pil(custom_to_pil(A ) ) SCREAMING_SNAKE_CASE : int = loop_post_process(A ) for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ): if show_intermediate: show_pil(A ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) ) if self.log: wandb.log({'Image': wandb.Image(A )} ) if show_final: show_pil(A ) if save_final: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
28
1
'''simple docstring''' import copy import random from transformers import CLIPTokenizer class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, *A, **A ): '''simple docstring''' super().__init__(*A, **A ) SCREAMING_SNAKE_CASE : Tuple = {} def UpperCamelCase_ ( self, A, *A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = super().add_tokens(A, *A, **A ) if num_added_tokens == 0: raise ValueError( F"The tokenizer already contains the token {placeholder_token}. Please pass a different" ' `placeholder_token` that is not already in the tokenizer.' ) def UpperCamelCase_ ( self, A, *A, A=1, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] if num_vec_per_token == 1: self.try_adding_tokens(A, *A, **A ) output.append(A ) else: SCREAMING_SNAKE_CASE : Dict = [] for i in range(A ): SCREAMING_SNAKE_CASE : str = placeholder_token + F"_{i}" self.try_adding_tokens(A, *A, **A ) output.append(A ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F"The tokenizer already has placeholder token {token} that can get confused with" F" {placeholder_token}keep placeholder tokens independent" ) SCREAMING_SNAKE_CASE : Dict = output def UpperCamelCase_ ( self, A, A=False, A=1.0 ): '''simple docstring''' if isinstance(A, A ): SCREAMING_SNAKE_CASE : List[Any] = [] for i in range(len(A ) ): output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=A ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: SCREAMING_SNAKE_CASE : Tuple = self.token_map[placeholder_token] SCREAMING_SNAKE_CASE : int = tokens[: 1 + int(len(A ) * prop_tokens_to_load )] if vector_shuffle: SCREAMING_SNAKE_CASE : Union[str, Any] = copy.copy(A ) random.shuffle(A ) SCREAMING_SNAKE_CASE : Dict = text.replace(A, ' '.join(A ) ) return text def __call__( self, A, *A, A=False, A=1.0, **A ): '''simple docstring''' return super().__call__( self.replace_placeholder_tokens_in_text( A, vector_shuffle=A, prop_tokens_to_load=A ), *A, **A, ) def UpperCamelCase_ ( self, A, *A, A=False, A=1.0, **A ): '''simple docstring''' return super().encode( self.replace_placeholder_tokens_in_text( A, vector_shuffle=A, prop_tokens_to_load=A ), *A, **A, )
28
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A ) def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet( A, A, A, A, A, A, A, A, A, A, A, ) # merge samples if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample else: SCREAMING_SNAKE_CASE : str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(A, A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Optional[int] = save_directory for controlnet in self.nets: controlnet.save_pretrained( A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, ) idx += 1 SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}" @classmethod def UpperCamelCase_ ( cls, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : List[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path while os.path.isdir(A ): SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A ) controlnets.append(A ) idx += 1 SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}" logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." ) if len(A ) == 0: raise ValueError( F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." ) return cls(A )
28
1
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva UpperCamelCase_ = "" UpperCamelCase_ = "" UpperCamelCase_ = "" UpperCamelCase_ = 1 # (0 is vertical, 1 is horizontal) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = get_dataset(__UpperCamelCase ,__UpperCamelCase ) print('Processing...' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = update_image_and_anno(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) for index, image in enumerate(__UpperCamelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' SCREAMING_SNAKE_CASE : Tuple = random_chars(32 ) SCREAMING_SNAKE_CASE : Union[str, Any] = paths[index].split(os.sep )[-1].rsplit('.' ,1 )[0] SCREAMING_SNAKE_CASE : Optional[int] = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" cva.imwrite(f"/{file_root}.jpg" ,__UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"Success {index+1}/{len(__UpperCamelCase )} with {file_name}" ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] for anno in new_annos[index]: SCREAMING_SNAKE_CASE : Optional[Any] = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" annos_list.append(__UpperCamelCase ) with open(f"/{file_root}.txt" ,'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : Optional[int] = [] for label_file in glob.glob(os.path.join(__UpperCamelCase ,'*.txt' ) ): SCREAMING_SNAKE_CASE : Optional[Any] = label_file.split(os.sep )[-1].rsplit('.' ,1 )[0] with open(__UpperCamelCase ) as in_file: SCREAMING_SNAKE_CASE : List[Any] = in_file.readlines() SCREAMING_SNAKE_CASE : Dict = os.path.join(__UpperCamelCase ,f"{label_name}.jpg" ) SCREAMING_SNAKE_CASE : str = [] for obj_list in obj_lists: SCREAMING_SNAKE_CASE : str = obj_list.rstrip('\n' ).split(' ' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__UpperCamelCase ) labels.append(__UpperCamelCase ) return img_paths, labels def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: list ,__UpperCamelCase: int = 1 ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : Dict = [] SCREAMING_SNAKE_CASE : Any = [] for idx in range(len(__UpperCamelCase ) ): SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Tuple = img_list[idx] path_list.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : str = anno_list[idx] SCREAMING_SNAKE_CASE : List[str] = cva.imread(__UpperCamelCase ) if flip_type == 1: SCREAMING_SNAKE_CASE : Union[str, Any] = cva.flip(__UpperCamelCase ,__UpperCamelCase ) for bbox in img_annos: SCREAMING_SNAKE_CASE : Dict = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: SCREAMING_SNAKE_CASE : Optional[Any] = cva.flip(__UpperCamelCase ,__UpperCamelCase ) for bbox in img_annos: SCREAMING_SNAKE_CASE : int = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__UpperCamelCase ) new_imgs_list.append(__UpperCamelCase ) return new_imgs_list, new_annos_lists, path_list def lowercase__( __UpperCamelCase: int = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" SCREAMING_SNAKE_CASE : Optional[Any] = ascii_lowercase + digits return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
28
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : str = ['''audio_values''', '''audio_mask'''] def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ): '''simple docstring''' super().__init__( feature_size=A, sampling_rate=A, padding_value=A, **A, ) SCREAMING_SNAKE_CASE : str = spectrogram_length SCREAMING_SNAKE_CASE : Optional[Any] = num_channels SCREAMING_SNAKE_CASE : List[str] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1] SCREAMING_SNAKE_CASE : Dict = n_fft SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate SCREAMING_SNAKE_CASE : str = sampling_rate SCREAMING_SNAKE_CASE : int = padding_value SCREAMING_SNAKE_CASE : Any = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = spectrogram( A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, ) SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1] SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0 SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled" F" with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) SCREAMING_SNAKE_CASE : int = is_batched_numpy or ( isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A, np.ndarray ): SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa ) elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis SCREAMING_SNAKE_CASE : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask SCREAMING_SNAKE_CASE : Tuple = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: SCREAMING_SNAKE_CASE : List[Any] = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa ) # convert into correct format for padding SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value for i in range(len(A ) ): SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i] SCREAMING_SNAKE_CASE : Union[str, Any] = feature # return as BatchFeature if return_attention_mask: SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features} SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A ) return encoded_inputs
28
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { "SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Dict = '''deformable_detr''' A : List[str] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self, A=True, A=None, A=3, A=300, A=1_024, A=6, A=1_024, A=8, A=6, A=1_024, A=8, A=0.0, A=True, A="relu", A=256, A=0.1, A=0.0, A=0.0, A=0.02, A=1.0, A=True, A=False, A="sine", A="resnet50", A=True, A=False, A=4, A=4, A=4, A=False, A=300, A=False, A=1, A=5, A=2, A=1, A=1, A=5, A=2, A=0.1, A=0.25, A=False, **A, ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(A, A ): SCREAMING_SNAKE_CASE : Any = backbone_config.get('model_type' ) SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE : Optional[Any] = config_class.from_dict(A ) SCREAMING_SNAKE_CASE : List[Any] = use_timm_backbone SCREAMING_SNAKE_CASE : Dict = backbone_config SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = num_queries SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings SCREAMING_SNAKE_CASE : int = d_model SCREAMING_SNAKE_CASE : List[str] = encoder_ffn_dim SCREAMING_SNAKE_CASE : List[str] = encoder_layers SCREAMING_SNAKE_CASE : Tuple = encoder_attention_heads SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_ffn_dim SCREAMING_SNAKE_CASE : Tuple = decoder_layers SCREAMING_SNAKE_CASE : Dict = decoder_attention_heads SCREAMING_SNAKE_CASE : List[Any] = dropout SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout SCREAMING_SNAKE_CASE : Optional[int] = activation_dropout SCREAMING_SNAKE_CASE : int = activation_function SCREAMING_SNAKE_CASE : Optional[int] = init_std SCREAMING_SNAKE_CASE : List[str] = init_xavier_std SCREAMING_SNAKE_CASE : str = encoder_layerdrop SCREAMING_SNAKE_CASE : Dict = auxiliary_loss SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type SCREAMING_SNAKE_CASE : List[str] = backbone SCREAMING_SNAKE_CASE : Any = use_pretrained_backbone SCREAMING_SNAKE_CASE : Dict = dilation # deformable attributes SCREAMING_SNAKE_CASE : List[str] = num_feature_levels SCREAMING_SNAKE_CASE : Dict = encoder_n_points SCREAMING_SNAKE_CASE : Optional[int] = decoder_n_points SCREAMING_SNAKE_CASE : List[str] = two_stage SCREAMING_SNAKE_CASE : Optional[int] = two_stage_num_proposals SCREAMING_SNAKE_CASE : Any = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('If two_stage is True, with_box_refine must be True.' ) # Hungarian matcher SCREAMING_SNAKE_CASE : Optional[Any] = class_cost SCREAMING_SNAKE_CASE : Any = bbox_cost SCREAMING_SNAKE_CASE : Tuple = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE : Optional[int] = mask_loss_coefficient SCREAMING_SNAKE_CASE : List[str] = dice_loss_coefficient SCREAMING_SNAKE_CASE : Optional[int] = bbox_loss_coefficient SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient SCREAMING_SNAKE_CASE : Tuple = focal_alpha SCREAMING_SNAKE_CASE : Optional[Any] = disable_custom_kernels super().__init__(is_encoder_decoder=A, **A ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.encoder_attention_heads @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.d_model def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.model_type return output
28
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841 SCREAMING_SNAKE_CASE : Optional[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] ) SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
28
1
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : List[str] = GPTSanJapaneseTokenizer A : Optional[Any] = False A : List[Any] = {'''do_clean_text''': False, '''add_prefix_space''': False} def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # fmt: off SCREAMING_SNAKE_CASE : Union[str, Any] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on SCREAMING_SNAKE_CASE : Optional[Any] = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['emoji_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.emoji_file, 'w' ) as emoji_writer: emoji_writer.write(json.dumps(A ) ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 'こんにちは、世界。 \nこんばんは、㔺界。😀' SCREAMING_SNAKE_CASE : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.get_input_output_texts(A ) SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(A, clean_up_tokenization_spaces=A ) return text, ids def UpperCamelCase_ ( self ): '''simple docstring''' pass # TODO add if relevant def UpperCamelCase_ ( self ): '''simple docstring''' pass # TODO add if relevant def UpperCamelCase_ ( self ): '''simple docstring''' pass # TODO add if relevant def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() # Testing tokenization SCREAMING_SNAKE_CASE : int = 'こんにちは、世界。 こんばんは、㔺界。' SCREAMING_SNAKE_CASE : str = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(A ) self.assertListEqual(A, A ) # Testing conversion to ids without special tokens SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A, A ) # Testing conversion to ids with special tokens SCREAMING_SNAKE_CASE : Union[str, Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.get_tokenizer() # Testing tokenization SCREAMING_SNAKE_CASE : int = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' SCREAMING_SNAKE_CASE : List[str] = 'こんにちは、、、、世界。こんばんは、、、、世界。' SCREAMING_SNAKE_CASE : str = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : str = tokenizer.decode(A ) self.assertEqual(A, A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization SCREAMING_SNAKE_CASE : List[Any] = 'こんにちは、世界。' SCREAMING_SNAKE_CASE : Dict = 'こんばんは、㔺界。😀' SCREAMING_SNAKE_CASE : List[Any] = 'こんにちは、世界。こんばんは、世界。😀' SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(prefix_text + input_text ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode('', prefix_text=prefix_text + input_text ) SCREAMING_SNAKE_CASE : int = tokenizer.encode(A, prefix_text=A ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(A ) SCREAMING_SNAKE_CASE : Any = tokenizer.decode(A ) SCREAMING_SNAKE_CASE : Any = tokenizer.decode(A ) self.assertEqual(A, A ) self.assertEqual(A, A ) self.assertEqual(A, A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization SCREAMING_SNAKE_CASE : Optional[Any] = 'こんにちは、世界。' SCREAMING_SNAKE_CASE : Dict = 'こんばんは、㔺界。😀' SCREAMING_SNAKE_CASE : str = len(tokenizer.encode(A ) ) - 2 SCREAMING_SNAKE_CASE : Optional[Any] = len(tokenizer.encode(A ) ) - 2 SCREAMING_SNAKE_CASE : Union[str, Any] = [1] + [0] * (len_prefix + len_text + 1) SCREAMING_SNAKE_CASE : Optional[Any] = [1] * (len_prefix + len_text + 1) + [0] SCREAMING_SNAKE_CASE : List[str] = [1] + [1] * (len_prefix) + [0] * (len_text + 1) SCREAMING_SNAKE_CASE : List[Any] = tokenizer(prefix_text + input_text ).token_type_ids SCREAMING_SNAKE_CASE : Tuple = tokenizer('', prefix_text=prefix_text + input_text ).token_type_ids SCREAMING_SNAKE_CASE : List[str] = tokenizer(A, prefix_text=A ).token_type_ids self.assertListEqual(A, A ) self.assertListEqual(A, A ) self.assertListEqual(A, A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) SCREAMING_SNAKE_CASE : str = tokenizer.encode('あンいワ' ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode('', prefix_text='あンいワ' ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode('いワ', prefix_text='あン' ) self.assertEqual(tokenizer.decode(A ), tokenizer.decode(A ) ) self.assertEqual(tokenizer.decode(A ), tokenizer.decode(A ) ) self.assertNotEqual(A, A ) self.assertNotEqual(A, A ) self.assertEqual(x_token_a[1], x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1], x_token_a[3] ) # SEG token @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) SCREAMING_SNAKE_CASE : Any = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']] SCREAMING_SNAKE_CASE : Any = tokenizer(A, padding=A ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.batch_encode_plus(A, padding=A ) # fmt: off SCREAMING_SNAKE_CASE : int = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]] SCREAMING_SNAKE_CASE : str = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] SCREAMING_SNAKE_CASE : List[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids, A ) self.assertListEqual(x_token.token_type_ids, A ) self.assertListEqual(x_token.attention_mask, A ) self.assertListEqual(x_token_a.input_ids, A ) self.assertListEqual(x_token_a.token_type_ids, A ) self.assertListEqual(x_token_a.attention_mask, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' pass
28
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : int = StableDiffusionDiffEditPipeline A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} A : str = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A : Union[str, Any] = frozenset([] ) def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, ) SCREAMING_SNAKE_CASE : int = DDIMScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, ) SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, ) SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE : int = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Dict = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Any = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' if not hasattr(self.pipeline_class, '_optional_components' ): return SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A, A, A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0] SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max() self.assertLess(A, 1E-4 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = 'cpu' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A ) SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16) ) SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 ) SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) self.assertEqual(mask[0, -3, -4], 0 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A ) SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A ) SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) @require_torch_gpu @slow class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) ) SCREAMING_SNAKE_CASE : List[str] = raw_image def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit' SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears' SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents SCREAMING_SNAKE_CASE : List[str] = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : List[Any] = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = 'a bowl of fruit' SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears' SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents SCREAMING_SNAKE_CASE : str = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : Tuple = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
28
1
'''simple docstring''' import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: Any ,__UpperCamelCase: Optional[Any] ): """simple docstring""" if openai_config_file == "": SCREAMING_SNAKE_CASE : Tuple = OpenAIGPTConfig() else: SCREAMING_SNAKE_CASE : int = OpenAIGPTConfig.from_json_file(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTModel(__UpperCamelCase ) # Load weights from numpy load_tf_weights_in_openai_gpt(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # Save pytorch-model SCREAMING_SNAKE_CASE : int = pytorch_dump_folder_path + '/' + WEIGHTS_NAME SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + '/' + CONFIG_NAME print(f"Save PyTorch model to {pytorch_weights_dump_path}" ) torch.save(model.state_dict() ,__UpperCamelCase ) print(f"Save configuration file to {pytorch_config_dump_path}" ) with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--openai_checkpoint_folder_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--openai_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture." ), ) UpperCamelCase_ = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int = 1_00_00_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )] for i in range(2 ,limit + 1 ): if phi[i] == i - 1: for j in range(2 * i ,limit + 1 ,__UpperCamelCase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
28
1
'''simple docstring''' from collections.abc import Callable import numpy as np def lowercase__( __UpperCamelCase: Callable ,__UpperCamelCase: float ,__UpperCamelCase: float ,__UpperCamelCase: float ,__UpperCamelCase: float ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = int(np.ceil((x_end - xa) / step_size ) ) SCREAMING_SNAKE_CASE : Any = np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE : Tuple = ya SCREAMING_SNAKE_CASE : str = xa for k in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE : Tuple = y[k] + step_size * ode_func(__UpperCamelCase ,y[k] ) SCREAMING_SNAKE_CASE : Optional[int] = y[k] + ( (step_size / 2) * (ode_func(__UpperCamelCase ,y[k] ) + ode_func(x + step_size ,__UpperCamelCase )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : str = LongformerTokenizer A : List[str] = True A : Optional[int] = LongformerTokenizerFast A : Tuple = True def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE : Any = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) ) SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(A ) + '\n' ) with open(self.merges_file, 'w', encoding='utf-8' ) as fp: fp.write('\n'.join(A ) ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'lower newer' SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer' return input_text, output_text def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer' SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A ) SCREAMING_SNAKE_CASE : int = tokenizer.encode( 'sequence builders', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode( 'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.' SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A, A ) SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A, A ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A, A ) # Testing spaces after special tokens SCREAMING_SNAKE_CASE : Optional[int] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A ) SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence' SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence' SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Tuple = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A, A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.' SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), ) SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def UpperCamelCase_ ( self ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ): SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['trim_offsets'], A ) def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}" SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
28
1
'''simple docstring''' UpperCamelCase_ = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } def lowercase__( __UpperCamelCase: dict ,__UpperCamelCase: Dict ,__UpperCamelCase: List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : int = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE : Any = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE : List[Any] = queue.pop(0 ) # get the last node from the path SCREAMING_SNAKE_CASE : Optional[Any] = path[-1] if node not in explored: SCREAMING_SNAKE_CASE : int = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE : Tuple = list(__UpperCamelCase ) new_path.append(__UpperCamelCase ) queue.append(__UpperCamelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(__UpperCamelCase ) # in case there's no path between the 2 nodes return [] def lowercase__( __UpperCamelCase: dict ,__UpperCamelCase: Dict ,__UpperCamelCase: List[str] ): """simple docstring""" if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE : str = [start] SCREAMING_SNAKE_CASE : List[Any] = set(__UpperCamelCase ) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE : int = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE : Optional[Any] = queue.pop(0 ) if node == target: SCREAMING_SNAKE_CASE : Optional[Any] = ( dist[node] if dist[target] == -1 else min(dist[target] ,dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(__UpperCamelCase ) queue.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
28
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Union[str, Any] = StableDiffusionXLImgaImgPipeline A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} A : str = PipelineTesterMixin.required_optional_params - {'''latents'''} A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, ) SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler( beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, ) SCREAMING_SNAKE_CASE : int = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A ) SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : str = image / 2 + 0.5 if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : List[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : str = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : str = sd_pipe.to(A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) # forward without prompt embeds SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']] SCREAMING_SNAKE_CASE : int = sd_pipe(**A ) SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )] ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe( **A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, ) SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) ) SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A ) SCREAMING_SNAKE_CASE : str = pipe(**A ).images SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
28
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase_ = { "configuration_efficientnet": [ "EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientNetConfig", "EfficientNetOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["EfficientNetImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientNetForImageClassification", "EfficientNetModel", "EfficientNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
28
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Dict = '''char''' A : Any = '''bpe''' A : Dict = '''wp''' UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = ['''image_processor''', '''char_tokenizer'''] A : int = '''ViTImageProcessor''' A : List[str] = '''MgpstrTokenizer''' def __init__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', A, ) SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(A, A ) def __call__( self, A=None, A=None, A=None, **A ): '''simple docstring''' if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A ) if text is not None: SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE : Any = encodings['input_ids'] return inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : Tuple = [] for i in range(A ): SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]] SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]] SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) SCREAMING_SNAKE_CASE : List[Any] = {} SCREAMING_SNAKE_CASE : int = final_strs SCREAMING_SNAKE_CASE : Any = final_scores SCREAMING_SNAKE_CASE : Dict = char_strs SCREAMING_SNAKE_CASE : Any = bpe_strs SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs return out def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if format == DecodeType.CHARACTER: SCREAMING_SNAKE_CASE : List[Any] = self.char_decode SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : str = '[s]' elif format == DecodeType.BPE: SCREAMING_SNAKE_CASE : str = self.bpe_decode SCREAMING_SNAKE_CASE : str = 2 SCREAMING_SNAKE_CASE : List[str] = '#' elif format == DecodeType.WORDPIECE: SCREAMING_SNAKE_CASE : Any = self.wp_decode SCREAMING_SNAKE_CASE : Tuple = 102 SCREAMING_SNAKE_CASE : List[Any] = '[SEP]' else: raise ValueError(F"Format {format} is not supported." ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], [] SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 ) SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A ) SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:] SCREAMING_SNAKE_CASE : List[Any] = decoder(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 ) SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:] for index in range(A ): SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A ) SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos] SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist() SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1 SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1] SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A ) conf_scores.append(A ) return dec_strs, conf_scores def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )] return decode_strs def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )] return decode_strs
28
1
'''simple docstring''' UpperCamelCase_ = [ "Audio", "Array2D", "Array3D", "Array4D", "Array5D", "ClassLabel", "Features", "Sequence", "Value", "Image", "Translation", "TranslationVariableLanguages", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
28
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger("transformers.models.speecht5") def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ): """simple docstring""" hf_model.apply_weight_norm() SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v'] SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"] SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v'] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,): """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig() SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase ) load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float() SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float() model.save_pretrained(__UpperCamelCase ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) UpperCamelCase_ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
28
1
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: int=0.9_9_9 ,__UpperCamelCase: List[Any]="cosine" ,): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCamelCase: Optional[int] ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCamelCase: List[str] ): return math.exp(t * -1_2.0 ) else: raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" ) SCREAMING_SNAKE_CASE : Optional[int] = [] for i in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps SCREAMING_SNAKE_CASE : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) ) return torch.tensor(__UpperCamelCase ,dtype=torch.floataa ) class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' A : str = [e.name for e in KarrasDiffusionSchedulers] A : Any = 2 @register_to_config def __init__( self, A = 1_000, A = 0.0_00_85, A = 0.0_12, A = "linear", A = None, A = "epsilon", A = "linspace", A = 0, ): '''simple docstring''' if trained_betas is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(A, dtype=torch.floataa ) elif beta_schedule == "linear": SCREAMING_SNAKE_CASE : Dict = torch.linspace(A, A, A, dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. SCREAMING_SNAKE_CASE : Any = ( torch.linspace(beta_start**0.5, beta_end**0.5, A, dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule SCREAMING_SNAKE_CASE : Union[str, Any] = betas_for_alpha_bar(A ) else: raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" ) SCREAMING_SNAKE_CASE : List[Any] = 1.0 - self.betas SCREAMING_SNAKE_CASE : Optional[Any] = torch.cumprod(self.alphas, dim=0 ) # set all values self.set_timesteps(A, A, A ) def UpperCamelCase_ ( self, A, A=None ): '''simple docstring''' if schedule_timesteps is None: SCREAMING_SNAKE_CASE : List[str] = self.timesteps SCREAMING_SNAKE_CASE : Tuple = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: SCREAMING_SNAKE_CASE : Union[str, Any] = 1 if len(A ) > 1 else 0 else: SCREAMING_SNAKE_CASE : Optional[int] = timestep.cpu().item() if torch.is_tensor(A ) else timestep SCREAMING_SNAKE_CASE : List[Any] = self._index_counter[timestep_int] return indices[pos].item() @property def UpperCamelCase_ ( self ): '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def UpperCamelCase_ ( self, A, A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.index_for_timestep(A ) if self.state_in_first_order: SCREAMING_SNAKE_CASE : List[Any] = self.sigmas[step_index] else: SCREAMING_SNAKE_CASE : List[str] = self.sigmas_interpol[step_index] SCREAMING_SNAKE_CASE : Any = sample / ((sigma**2 + 1) ** 0.5) return sample def UpperCamelCase_ ( self, A, A = None, A = None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = num_inference_steps SCREAMING_SNAKE_CASE : Dict = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": SCREAMING_SNAKE_CASE : Dict = np.linspace(0, num_train_timesteps - 1, A, dtype=A )[::-1].copy() elif self.config.timestep_spacing == "leading": SCREAMING_SNAKE_CASE : Tuple = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE : List[str] = (np.arange(0, A ) * step_ratio).round()[::-1].copy().astype(A ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": SCREAMING_SNAKE_CASE : List[Any] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(A, 0, -step_ratio )).round().copy().astype(A ) timesteps -= 1 else: raise ValueError( F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) SCREAMING_SNAKE_CASE : Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.log(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[Any] = np.interp(A, np.arange(0, len(A ) ), A ) SCREAMING_SNAKE_CASE : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) SCREAMING_SNAKE_CASE : int = torch.from_numpy(A ).to(device=A ) # interpolate sigmas SCREAMING_SNAKE_CASE : str = sigmas.log().lerp(sigmas.roll(1 ).log(), 0.5 ).exp() SCREAMING_SNAKE_CASE : int = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) SCREAMING_SNAKE_CASE : List[Any] = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(A ).startswith('mps' ): # mps does not support float64 SCREAMING_SNAKE_CASE : int = torch.from_numpy(A ).to(A, dtype=torch.floataa ) else: SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(A ).to(A ) # interpolate timesteps SCREAMING_SNAKE_CASE : Tuple = self.sigma_to_t(A ).to(A, dtype=timesteps.dtype ) SCREAMING_SNAKE_CASE : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1 ).flatten() SCREAMING_SNAKE_CASE : int = torch.cat([timesteps[:1], interleaved_timesteps] ) SCREAMING_SNAKE_CASE : Any = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = sigma.log() # get distribution SCREAMING_SNAKE_CASE : Dict = log_sigma - self.log_sigmas[:, None] # get sigmas range SCREAMING_SNAKE_CASE : Union[str, Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) SCREAMING_SNAKE_CASE : Dict = low_idx + 1 SCREAMING_SNAKE_CASE : List[str] = self.log_sigmas[low_idx] SCREAMING_SNAKE_CASE : Tuple = self.log_sigmas[high_idx] # interpolate sigmas SCREAMING_SNAKE_CASE : List[str] = (low - log_sigma) / (low - high) SCREAMING_SNAKE_CASE : Union[str, Any] = w.clamp(0, 1 ) # transform interpolation to time range SCREAMING_SNAKE_CASE : Tuple = (1 - w) * low_idx + w * high_idx SCREAMING_SNAKE_CASE : Tuple = t.view(sigma.shape ) return t @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.sample is None def UpperCamelCase_ ( self, A, A, A, A = True, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.index_for_timestep(A ) # advance index counter by 1 SCREAMING_SNAKE_CASE : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(A ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: SCREAMING_SNAKE_CASE : List[str] = self.sigmas[step_index] SCREAMING_SNAKE_CASE : Tuple = self.sigmas_interpol[step_index + 1] SCREAMING_SNAKE_CASE : str = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method SCREAMING_SNAKE_CASE : Dict = self.sigmas[step_index - 1] SCREAMING_SNAKE_CASE : Optional[int] = self.sigmas_interpol[step_index] SCREAMING_SNAKE_CASE : List[Any] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE : str = sigma_hat if self.state_in_first_order else sigma_interpol SCREAMING_SNAKE_CASE : Tuple = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": SCREAMING_SNAKE_CASE : Any = sigma_hat if self.state_in_first_order else sigma_interpol SCREAMING_SNAKE_CASE : Union[str, Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('prediction_type not implemented yet: sample' ) else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order SCREAMING_SNAKE_CASE : int = (sample - pred_original_sample) / sigma_hat # 3. delta timestep SCREAMING_SNAKE_CASE : Union[str, Any] = sigma_interpol - sigma_hat # store for 2nd order step SCREAMING_SNAKE_CASE : List[str] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order SCREAMING_SNAKE_CASE : List[str] = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep SCREAMING_SNAKE_CASE : Any = sigma_next - sigma_hat SCREAMING_SNAKE_CASE : Tuple = self.sample SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : List[Any] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=A ) def UpperCamelCase_ ( self, A, A, A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(A ): # mps does not support float64 SCREAMING_SNAKE_CASE : Optional[int] = self.timesteps.to(original_samples.device, dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Optional[int] = timesteps.to(original_samples.device, dtype=torch.floataa ) else: SCREAMING_SNAKE_CASE : Optional[Any] = self.timesteps.to(original_samples.device ) SCREAMING_SNAKE_CASE : Union[str, Any] = timesteps.to(original_samples.device ) SCREAMING_SNAKE_CASE : List[str] = [self.index_for_timestep(A, A ) for t in timesteps] SCREAMING_SNAKE_CASE : Tuple = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): SCREAMING_SNAKE_CASE : Union[str, Any] = sigma.unsqueeze(-1 ) SCREAMING_SNAKE_CASE : Optional[int] = original_samples + noise * sigma return noisy_samples def __len__( self ): '''simple docstring''' return self.config.num_train_timesteps
28
'''simple docstring''' from typing import Any class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = data SCREAMING_SNAKE_CASE : Any = None def __repr__( self ): '''simple docstring''' return F"Node({self.data})" class _a : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = None def __iter__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.head while node: yield node.data SCREAMING_SNAKE_CASE : List[str] = node.next def __len__( self ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ): '''simple docstring''' return "->".join([str(A ) for item in self] ) def __getitem__( self, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self, A, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(A ): SCREAMING_SNAKE_CASE : Union[str, Any] = current.next SCREAMING_SNAKE_CASE : Any = data def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(len(self ), A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(0, A ) def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A ) if self.head is None: SCREAMING_SNAKE_CASE : Optional[int] = new_node elif index == 0: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head SCREAMING_SNAKE_CASE : Tuple = new_node else: SCREAMING_SNAKE_CASE : Optional[int] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : str = temp.next SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next SCREAMING_SNAKE_CASE : List[str] = new_node def UpperCamelCase_ ( self ): # print every node data '''simple docstring''' print(self ) def UpperCamelCase_ ( self ): '''simple docstring''' return self.delete_nth(0 ) def UpperCamelCase_ ( self ): # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def UpperCamelCase_ ( self, A = 0 ): '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node if index == 0: SCREAMING_SNAKE_CASE : List[str] = self.head.next else: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Any = temp.next SCREAMING_SNAKE_CASE : List[str] = temp.next SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next return delete_node.data def UpperCamelCase_ ( self ): '''simple docstring''' return self.head is None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : Any = self.head while current: # Store the current node's next node. SCREAMING_SNAKE_CASE : Optional[int] = current.next # Make the current node's next point backwards SCREAMING_SNAKE_CASE : int = prev # Make the previous node be the current node SCREAMING_SNAKE_CASE : int = current # Make the current node the next node (to progress iteration) SCREAMING_SNAKE_CASE : List[Any] = next_node # Return prev in order to put the head at the end SCREAMING_SNAKE_CASE : List[Any] = prev def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = LinkedList() assert linked_list.is_empty() is True assert str(__UpperCamelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__UpperCamelCase ) == i linked_list.insert_nth(__UpperCamelCase ,i + 1 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__UpperCamelCase ) == 9 assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True for i in range(0 ,9 ): SCREAMING_SNAKE_CASE : Any = -i assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True linked_list.reverse() assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [ -9, 1_00, Node(77_34_51_12 ), 'dlrow olleH', 7, 55_55, 0, -1_9_2.5_5_5_5_5, 'Hello, world!', 7_7.9, Node(10 ), None, None, 1_2.2_0, ] SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() for i in test_input: linked_list.insert_tail(__UpperCamelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head SCREAMING_SNAKE_CASE : str = linked_list.delete_head() assert result == -9 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 ) assert result is None assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__UpperCamelCase ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__UpperCamelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowercase__( ): """simple docstring""" from doctest import testmod testmod() SCREAMING_SNAKE_CASE : Dict = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(__UpperCamelCase ) print('\nReading/changing Node data using indexing:' ) print(f"Element at Position 1: {linked_list[1]}" ) SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip() print('New list:' ) print(__UpperCamelCase ) print(f"length of linked_list is : {len(__UpperCamelCase )}" ) if __name__ == "__main__": main()
28
1
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) UpperCamelCase_ = [ "cross_validation.py", "gradient_accumulation.py", "local_sgd.py", "multi_process_metrics.py", "memory.py", "automatic_gradient_accumulation.py", "fsdp_with_peak_mem_tracking.py", "deepspeed_with_config_support.py", "megatron_lm_gpt_pretraining.py", ] class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self, A, A, A = None, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : str = os.path.abspath(os.path.join('examples', 'by_feature' ) ) SCREAMING_SNAKE_CASE : str = os.path.abspath('examples' ) for item in os.listdir(A ): if item not in EXCLUDE_EXAMPLES: SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(A, A ) if os.path.isfile(A ) and ".py" in item_path: with self.subTest( tested_script=A, feature_script=A, tested_section='main()' if parser_only else 'training_function()', ): SCREAMING_SNAKE_CASE : str = compare_against_test( os.path.join(A, A ), A, A, A ) SCREAMING_SNAKE_CASE : Union[str, Any] = '\n'.join(A ) if special_strings is not None: for string in special_strings: SCREAMING_SNAKE_CASE : int = diff.replace(A, '' ) self.assertEqual(A, '' ) def UpperCamelCase_ ( self ): '''simple docstring''' self.one_complete_example('complete_nlp_example.py', A ) self.one_complete_example('complete_nlp_example.py', A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = os.path.abspath(os.path.join('examples', 'cv_example.py' ) ) SCREAMING_SNAKE_CASE : Dict = [ ' ' * 16 + '{\n\n', ' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 20 + '"f1": eval_metric["f1"],\n\n', ' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 20 + '"epoch": epoch,\n\n', ' ' * 16 + '},\n\n', ' ' * 16 + 'step=epoch,\n', ' ' * 12, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py', A, A, A ) self.one_complete_example('complete_cv_example.py', A, A, A ) @mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} ) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = False @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' super().setUpClass() SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(cls._tmpdir, 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) SCREAMING_SNAKE_CASE : Any = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' super().tearDownClass() shutil.rmtree(cls._tmpdir ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'epoch_0' ) ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split() SCREAMING_SNAKE_CASE : Tuple = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'step_2' ) ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'epoch_0' )}\n ".split() SCREAMING_SNAKE_CASE : List[str] = run_command(self._launch_args + testargs, return_stdout=A ) self.assertNotIn('epoch 0:', A ) self.assertIn('epoch 1:', A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'step_2' )}\n ".split() SCREAMING_SNAKE_CASE : Optional[Any] = run_command(self._launch_args + testargs, return_stdout=A ) if torch.cuda.is_available(): SCREAMING_SNAKE_CASE : Optional[Any] = torch.cuda.device_count() else: SCREAMING_SNAKE_CASE : Any = 1 if num_processes > 1: self.assertNotIn('epoch 0:', A ) self.assertIn('epoch 1:', A ) else: self.assertIn('epoch 0:', A ) self.assertIn('epoch 1:', A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ, {'TESTING_MOCKED_DATALOADERS': '0'} ): SCREAMING_SNAKE_CASE : Union[str, Any] = run_command(self._launch_args + testargs, return_stdout=A ) SCREAMING_SNAKE_CASE : Optional[int] = re.findall('({.+})', A ) SCREAMING_SNAKE_CASE : Dict = [r for r in results if 'accuracy' in r][-1] SCREAMING_SNAKE_CASE : Optional[int] = ast.literal_eval(A ) self.assertGreaterEqual(results['accuracy'], 0.75 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'} ) def UpperCamelCase_ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: SCREAMING_SNAKE_CASE : Optional[Any] = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(A, 'tracking' ) ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
28
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333} SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : Dict = batch_size SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Tuple = min_resolution SCREAMING_SNAKE_CASE : int = max_resolution SCREAMING_SNAKE_CASE : Tuple = do_resize SCREAMING_SNAKE_CASE : Tuple = size SCREAMING_SNAKE_CASE : Any = do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean SCREAMING_SNAKE_CASE : Union[str, Any] = image_std SCREAMING_SNAKE_CASE : Optional[int] = do_rescale SCREAMING_SNAKE_CASE : int = rescale_factor SCREAMING_SNAKE_CASE : List[str] = do_pad def UpperCamelCase_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if not batched: SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0] if isinstance(A, Image.Image ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w ) SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] elif w > h: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h ) else: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] else: SCREAMING_SNAKE_CASE : Union[str, Any] = [] for image in image_inputs: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0] SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : List[Any] = YolosImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A, 'image_mean' ) ) self.assertTrue(hasattr(A, 'image_std' ) ) self.assertTrue(hasattr(A, 'do_normalize' ) ) self.assertTrue(hasattr(A, 'do_resize' ) ) self.assertTrue(hasattr(A, 'size' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} ) self.assertEqual(image_processor.do_pad, A ) SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A ) self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A, Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A ) SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A ) for image in image_inputs: self.assertIsInstance(A, np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' ) SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' ) self.assertTrue( torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() ) SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target} # encode them SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' ) SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify orig_size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : int = json.loads(f.read() ) SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target} SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' ) SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify masks SCREAMING_SNAKE_CASE : Optional[int] = 822_873 self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A ) # verify orig_size SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
28
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { "studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json", "studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json", } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[str] = '''luke''' def __init__( self, A=50_267, A=500_000, A=768, A=256, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=512, A=2, A=0.02, A=1E-12, A=True, A=None, A=1, A=0, A=2, **A, ): '''simple docstring''' super().__init__(pad_token_id=A, bos_token_id=A, eos_token_id=A, **A ) SCREAMING_SNAKE_CASE : Tuple = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = entity_vocab_size SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : List[str] = entity_emb_size SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_attention_heads SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act SCREAMING_SNAKE_CASE : List[Any] = intermediate_size SCREAMING_SNAKE_CASE : str = hidden_dropout_prob SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps SCREAMING_SNAKE_CASE : List[Any] = use_entity_aware_attention SCREAMING_SNAKE_CASE : Tuple = classifier_dropout
28
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,): """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) else: return _interleave_iterable_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,): """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase ) else: return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
28
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class _a ( metaclass=SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[str] = ['''flax'''] def __init__( self, *A, **A ): '''simple docstring''' requires_backends(self, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) class _a ( metaclass=SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[str] = ['''flax'''] def __init__( self, *A, **A ): '''simple docstring''' requires_backends(self, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) class _a ( metaclass=SCREAMING_SNAKE_CASE ): '''simple docstring''' A : str = ['''flax'''] def __init__( self, *A, **A ): '''simple docstring''' requires_backends(self, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) class _a ( metaclass=SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Dict = ['''flax'''] def __init__( self, *A, **A ): '''simple docstring''' requires_backends(self, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) class _a ( metaclass=SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Union[str, Any] = ['''flax'''] def __init__( self, *A, **A ): '''simple docstring''' requires_backends(self, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) class _a ( metaclass=SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Any = ['''flax'''] def __init__( self, *A, **A ): '''simple docstring''' requires_backends(self, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) class _a ( metaclass=SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[str] = ['''flax'''] def __init__( self, *A, **A ): '''simple docstring''' requires_backends(self, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) class _a ( metaclass=SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Any = ['''flax'''] def __init__( self, *A, **A ): '''simple docstring''' requires_backends(self, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) class _a ( metaclass=SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[str] = ['''flax'''] def __init__( self, *A, **A ): '''simple docstring''' requires_backends(self, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) class _a ( metaclass=SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Tuple = ['''flax'''] def __init__( self, *A, **A ): '''simple docstring''' requires_backends(self, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) class _a ( metaclass=SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Optional[int] = ['''flax'''] def __init__( self, *A, **A ): '''simple docstring''' requires_backends(self, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) class _a ( metaclass=SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Optional[int] = ['''flax'''] def __init__( self, *A, **A ): '''simple docstring''' requires_backends(self, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) class _a ( metaclass=SCREAMING_SNAKE_CASE ): '''simple docstring''' A : int = ['''flax'''] def __init__( self, *A, **A ): '''simple docstring''' requires_backends(self, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] ) @classmethod def UpperCamelCase_ ( cls, *A, **A ): '''simple docstring''' requires_backends(cls, ['flax'] )
28
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) ) class _a : '''simple docstring''' def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : str = patch_size SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : int = last_hidden_size SCREAMING_SNAKE_CASE : Any = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size SCREAMING_SNAKE_CASE : Optional[Any] = output_stride SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Dict = num_labels SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = scope def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels ) SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) SCREAMING_SNAKE_CASE : int = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model(A ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.num_labels SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model(A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) SCREAMING_SNAKE_CASE : int = model(A, labels=A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) A : List[Any] = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) A : Optional[int] = False A : Dict = False A : List[Any] = False A : Optional[int] = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self ) SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A ) def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not output attentions' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A ) SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Any = ['pixel_values'] self.assertListEqual(arg_names[:1], A ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' def check_hidden_states_output(A, A, A ): SCREAMING_SNAKE_CASE : Any = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) ) SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states SCREAMING_SNAKE_CASE : List[str] = 5 self.assertEqual(len(A ), A ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. SCREAMING_SNAKE_CASE : int = 2 for i in range(len(A ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = True check_hidden_states_output(A, A, A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Optional[Any] = True check_hidden_states_output(A, A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _a ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A ) SCREAMING_SNAKE_CASE : Any = self.default_image_processor SCREAMING_SNAKE_CASE : Dict = prepare_img() SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**A ) # verify the logits SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, A ) SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A ) SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = model(**A ) SCREAMING_SNAKE_CASE : List[str] = outputs.logits # verify the logits SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [ [[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]], [[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]], [[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]], ], device=A, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : List[str] = model.to(A ) SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img() SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(**A ) SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] ) SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, A ) SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A ) SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, A )
28
1
'''simple docstring''' import os from collections import deque import torch from torch.utils.data import Dataset class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A="", A="train" ): '''simple docstring''' assert os.path.isdir(A ) SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : str = os.listdir(A ) for story_filename in story_filenames_list: if "summary" in story_filename: continue SCREAMING_SNAKE_CASE : Tuple = os.path.join(A, A ) if not os.path.isfile(A ): continue self.documents.append(A ) def __len__( self ): '''simple docstring''' return len(self.documents ) def __getitem__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.documents[idx] SCREAMING_SNAKE_CASE : List[str] = document_path.split('/' )[-1] with open(A, encoding='utf-8' ) as source: SCREAMING_SNAKE_CASE : Union[str, Any] = source.read() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = process_story(A ) return document_name, story_lines, summary_lines def lowercase__( __UpperCamelCase: Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = list(filter(lambda __UpperCamelCase : len(__UpperCamelCase ) != 0 ,[line.strip() for line in raw_story.split('\n' )] ) ) # for some unknown reason some lines miss a period, add it SCREAMING_SNAKE_CASE : List[str] = [_add_missing_period(__UpperCamelCase ) for line in nonempty_lines] # gather article lines SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : int = deque(__UpperCamelCase ) while True: try: SCREAMING_SNAKE_CASE : Optional[Any] = lines.popleft() if element.startswith('@highlight' ): break story_lines.append(__UpperCamelCase ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines SCREAMING_SNAKE_CASE : int = list(filter(lambda __UpperCamelCase : not t.startswith('@highlight' ) ,__UpperCamelCase ) ) return story_lines, summary_lines def lowercase__( __UpperCamelCase: Dict ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')'] if line.startswith('@highlight' ): return line if line[-1] in END_TOKENS: return line return line + "." def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: Any ,__UpperCamelCase: Optional[Any] ): """simple docstring""" if len(__UpperCamelCase ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(__UpperCamelCase )) ) return sequence def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Dict ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = torch.ones_like(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = sequence == pad_token_id SCREAMING_SNAKE_CASE : List[Any] = 0 return mask def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Any ,__UpperCamelCase: Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = [tokenizer.encode(__UpperCamelCase ) for line in story_lines] SCREAMING_SNAKE_CASE : Optional[int] = [token for sentence in story_lines_token_ids for token in sentence] SCREAMING_SNAKE_CASE : str = [tokenizer.encode(__UpperCamelCase ) for line in summary_lines] SCREAMING_SNAKE_CASE : Dict = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = [] for sequence in batch: SCREAMING_SNAKE_CASE : Dict = -1 SCREAMING_SNAKE_CASE : List[Any] = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(__UpperCamelCase ) return torch.tensor(__UpperCamelCase )
28
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase_ = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } UpperCamelCase_ = { "distilbert-base-uncased": 5_1_2, "distilbert-base-uncased-distilled-squad": 5_1_2, "distilbert-base-cased": 5_1_2, "distilbert-base-cased-distilled-squad": 5_1_2, "distilbert-base-german-cased": 5_1_2, "distilbert-base-multilingual-cased": 5_1_2, } UpperCamelCase_ = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION A : Optional[int] = ['''input_ids''', '''attention_mask'''] A : List[Any] = DistilBertTokenizer def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ): '''simple docstring''' super().__init__( A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase', A ) != do_lower_case or normalizer_state.get('strip_accents', A ) != strip_accents or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : List[str] = strip_accents SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A ) SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case def UpperCamelCase_ ( self, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A ) return tuple(A )
28
1
'''simple docstring''' import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Optional[int] = XLMProphetNetTokenizer A : List[str] = False A : Union[str, Any] = True def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE : List[str] = XLMProphetNetTokenizer(A, keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = '[PAD]' SCREAMING_SNAKE_CASE : Any = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ), A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '[PAD]' ) self.assertEqual(vocab_keys[1], '[CLS]' ) self.assertEqual(vocab_keys[-1], 'j' ) self.assertEqual(len(A ), 1_012 ) def UpperCamelCase_ ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 1_012 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = XLMProphetNetTokenizer(A, keep_accents=A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('This is a test' ) self.assertListEqual(A, ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( A, [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ], ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ], ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A, [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '[UNK]', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '[UNK]', '.', ], ) @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = 'Hello World!' SCREAMING_SNAKE_CASE : int = [35_389, 6_672, 49, 2] self.assertListEqual(A, self.big_tokenizer.encode(A ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = {'input_ids': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A, model_name='microsoft/xprophetnet-large-wiki100-cased', revision='1acad1643ddd54a44df6a1b797ada8373685d90e', )
28
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 UpperCamelCase_ = get_tests_dir("fixtures") class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock() SCREAMING_SNAKE_CASE : List[Any] = 500 SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : Any = HTTPError SCREAMING_SNAKE_CASE : Any = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request', return_value=A ) as mock_head: SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' ) def UpperCamelCase_ ( self ): '''simple docstring''' with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' ) SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained( 'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' ) self.assertIsNotNone(A ) @is_staging_test class _a ( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id='test-image-processor' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' ) except HTTPError: pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, ) SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained( F"{USER}/test-dynamic-image-processor", trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
28
1
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser( description=( "Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned" " Distillation" ) ) parser.add_argument("--model_type", default="bert", choices=["bert"]) parser.add_argument("--model_name", default="bert-base-uncased", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") UpperCamelCase_ = parser.parse_args() if args.model_type == "bert": UpperCamelCase_ = BertForMaskedLM.from_pretrained(args.model_name) UpperCamelCase_ = "bert" else: raise ValueError("args.model_type should be \"bert\".") UpperCamelCase_ = model.state_dict() UpperCamelCase_ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCamelCase_ = state_dict[F"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""] UpperCamelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 1_1]: for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] UpperCamelCase_ = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] UpperCamelCase_ = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] UpperCamelCase_ = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] UpperCamelCase_ = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] UpperCamelCase_ = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] UpperCamelCase_ = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] UpperCamelCase_ = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 UpperCamelCase_ = state_dict["cls.predictions.decoder.weight"] UpperCamelCase_ = state_dict["cls.predictions.bias"] if args.vocab_transform: for w in ["weight", "bias"]: UpperCamelCase_ = state_dict[F"""cls.predictions.transform.dense.{w}"""] UpperCamelCase_ = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""] print(F"""N layers selected for distillation: {std_idx}""") print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
28
'''simple docstring''' class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = val SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Union[str, Any] = None def UpperCamelCase_ ( self, A ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: SCREAMING_SNAKE_CASE : Optional[int] = Node(A ) else: self.left.insert(A ) elif val > self.val: if self.right is None: SCREAMING_SNAKE_CASE : int = Node(A ) else: self.right.insert(A ) else: SCREAMING_SNAKE_CASE : int = val def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ): """simple docstring""" if root: inorder(root.left ,__UpperCamelCase ) res.append(root.val ) inorder(root.right ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[Any] ): """simple docstring""" if len(__UpperCamelCase ) == 0: return arr SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] ) for i in range(1 ,len(__UpperCamelCase ) ): root.insert(arr[i] ) # Traverse BST in order. SCREAMING_SNAKE_CASE : Dict = [] inorder(__UpperCamelCase ,__UpperCamelCase ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
28
1
'''simple docstring''' from __future__ import annotations from typing import Any class _a : '''simple docstring''' def __init__( self, A = 6 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Node | None = None SCREAMING_SNAKE_CASE : Node | None = None self.create_linked_list(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Node() SCREAMING_SNAKE_CASE : List[str] = current_node SCREAMING_SNAKE_CASE : Any = current_node SCREAMING_SNAKE_CASE : Dict = current_node for _ in range(1, A ): SCREAMING_SNAKE_CASE : Dict = Node() SCREAMING_SNAKE_CASE : List[str] = current_node SCREAMING_SNAKE_CASE : Optional[Any] = previous_node SCREAMING_SNAKE_CASE : Optional[int] = current_node SCREAMING_SNAKE_CASE : str = self.front SCREAMING_SNAKE_CASE : Optional[int] = previous_node def UpperCamelCase_ ( self ): '''simple docstring''' return ( self.front == self.rear and self.front is not None and self.front.data is None ) def UpperCamelCase_ ( self ): '''simple docstring''' self.check_can_perform_operation() return self.front.data if self.front else None def UpperCamelCase_ ( self, A ): '''simple docstring''' if self.rear is None: return self.check_is_full() if not self.is_empty(): SCREAMING_SNAKE_CASE : List[str] = self.rear.next if self.rear: SCREAMING_SNAKE_CASE : List[str] = data def UpperCamelCase_ ( self ): '''simple docstring''' self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: SCREAMING_SNAKE_CASE : List[Any] = self.front.data SCREAMING_SNAKE_CASE : Optional[Any] = None return data SCREAMING_SNAKE_CASE : Dict = self.front SCREAMING_SNAKE_CASE : List[str] = old_front.next SCREAMING_SNAKE_CASE : List[str] = old_front.data SCREAMING_SNAKE_CASE : str = None return data def UpperCamelCase_ ( self ): '''simple docstring''' if self.is_empty(): raise Exception('Empty Queue' ) def UpperCamelCase_ ( self ): '''simple docstring''' if self.rear and self.rear.next == self.front: raise Exception('Full Queue' ) class _a : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any | None = None SCREAMING_SNAKE_CASE : Node | None = None SCREAMING_SNAKE_CASE : Node | None = None if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ): """simple docstring""" from .. import __version__ SCREAMING_SNAKE_CASE : int = take_from SCREAMING_SNAKE_CASE : Optional[int] = () if not isinstance(args[0] ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : List[str] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ): raise ValueError( f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" f" version {__version__} is >= {version_name}" ) SCREAMING_SNAKE_CASE : Tuple = None if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(__UpperCamelCase ,__UpperCamelCase ): values += (getattr(__UpperCamelCase ,__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else '' warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0: SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1] SCREAMING_SNAKE_CASE : Any = call_frame.filename SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" ) if len(__UpperCamelCase ) == 0: return elif len(__UpperCamelCase ) == 1: return values[0] return values
28
1
'''simple docstring''' import argparse from collections import defaultdict def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: List[str] ,__UpperCamelCase: Tuple ,__UpperCamelCase: str ,__UpperCamelCase: Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = f"{file}_{class_name}_{test_name}" done_test[_id] += 1 with open(__UpperCamelCase ,'r' ) as f: SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines() SCREAMING_SNAKE_CASE : Optional[int] = f"class {class_name}(" SCREAMING_SNAKE_CASE : Union[str, Any] = f"{4 * ' '}def {test_name}(" SCREAMING_SNAKE_CASE : int = f"{8 * ' '}{correct_line.split()[0]}" SCREAMING_SNAKE_CASE : Optional[Any] = f"{16 * ' '}{correct_line.split()[0]}" SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[Any] = [] for line in lines: if line.startswith(__UpperCamelCase ): SCREAMING_SNAKE_CASE : Tuple = True elif in_class and line.startswith(__UpperCamelCase ): SCREAMING_SNAKE_CASE : str = True elif in_class and in_func and (line.startswith(__UpperCamelCase ) or line.startswith(__UpperCamelCase )): SCREAMING_SNAKE_CASE : Any = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: SCREAMING_SNAKE_CASE : Union[str, Any] = True if in_class and in_func and in_line: if ")" not in line: continue else: SCREAMING_SNAKE_CASE : str = True if in_class and in_func and in_line and insert_line: new_lines.append(f"{spaces * ' '}{correct_line}" ) SCREAMING_SNAKE_CASE : Optional[Any] = False else: new_lines.append(__UpperCamelCase ) with open(__UpperCamelCase ,'w' ) as f: for line in new_lines: f.write(__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: List[Any]=None ): """simple docstring""" if fail is not None: with open(__UpperCamelCase ,'r' ) as f: SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()} else: SCREAMING_SNAKE_CASE : Union[str, Any] = None with open(__UpperCamelCase ,'r' ) as f: SCREAMING_SNAKE_CASE : List[Any] = f.readlines() SCREAMING_SNAKE_CASE : Union[str, Any] = defaultdict(__UpperCamelCase ) for line in correct_lines: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = line.split(';' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("--correct_filename", help="filename of tests with expected result") parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None) UpperCamelCase_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
28
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig UpperCamelCase_ = [ "openmmlab/upernet-convnext-tiny", # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring UpperCamelCase_ = "UperNetConfig" class _a ( nn.Module ): '''simple docstring''' def __init__( self, A, A, A, A = 0, A = False, A = 1, ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad( in_channels=A, out_channels=A, kernel_size=A, padding=A, bias=A, dilation=A, ) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.BatchNormad(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ReLU() def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.conv(A ) SCREAMING_SNAKE_CASE : List[str] = self.batch_norm(A ) SCREAMING_SNAKE_CASE : Tuple = self.activation(A ) return output class _a ( nn.Module ): '''simple docstring''' def __init__( self, A, A, A ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : str = [ nn.AdaptiveAvgPoolad(A ), UperNetConvModule(A, A, kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(A ), A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = input for layer in self.layers: SCREAMING_SNAKE_CASE : Dict = layer(A ) return hidden_state class _a ( nn.Module ): '''simple docstring''' def __init__( self, A, A, A, A ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[Any] = pool_scales SCREAMING_SNAKE_CASE : List[str] = align_corners SCREAMING_SNAKE_CASE : List[Any] = in_channels SCREAMING_SNAKE_CASE : Optional[Any] = channels SCREAMING_SNAKE_CASE : int = [] for i, pool_scale in enumerate(A ): SCREAMING_SNAKE_CASE : str = UperNetPyramidPoolingBlock(pool_scale=A, in_channels=A, channels=A ) self.blocks.append(A ) self.add_module(str(A ), A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [] for ppm in self.blocks: SCREAMING_SNAKE_CASE : Tuple = ppm(A ) SCREAMING_SNAKE_CASE : str = nn.functional.interpolate( A, size=x.size()[2:], mode='bilinear', align_corners=self.align_corners ) ppm_outs.append(A ) return ppm_outs class _a ( nn.Module ): '''simple docstring''' def __init__( self, A, A ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : List[Any] = config SCREAMING_SNAKE_CASE : Any = config.pool_scales # e.g. (1, 2, 3, 6) SCREAMING_SNAKE_CASE : List[str] = in_channels SCREAMING_SNAKE_CASE : int = config.hidden_size SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : Tuple = nn.Convad(self.channels, config.num_labels, kernel_size=1 ) # PSP Module SCREAMING_SNAKE_CASE : Dict = UperNetPyramidPoolingModule( self.pool_scales, self.in_channels[-1], self.channels, align_corners=self.align_corners, ) SCREAMING_SNAKE_CASE : int = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels, self.channels, kernel_size=3, padding=1, ) # FPN Module SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList() SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer SCREAMING_SNAKE_CASE : List[str] = UperNetConvModule(A, self.channels, kernel_size=1 ) SCREAMING_SNAKE_CASE : Optional[Any] = UperNetConvModule(self.channels, self.channels, kernel_size=3, padding=1 ) self.lateral_convs.append(A ) self.fpn_convs.append(A ) SCREAMING_SNAKE_CASE : Dict = UperNetConvModule( len(self.in_channels ) * self.channels, self.channels, kernel_size=3, padding=1, ) def UpperCamelCase_ ( self ): '''simple docstring''' self.apply(self._init_weights ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if isinstance(A, nn.Convad ): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = inputs[-1] SCREAMING_SNAKE_CASE : List[Any] = [x] psp_outs.extend(self.psp_modules(A ) ) SCREAMING_SNAKE_CASE : str = torch.cat(A, dim=1 ) SCREAMING_SNAKE_CASE : int = self.bottleneck(A ) return output def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(A ) ) # build top-down path SCREAMING_SNAKE_CASE : Tuple = len(A ) for i in range(used_backbone_levels - 1, 0, -1 ): SCREAMING_SNAKE_CASE : Optional[int] = laterals[i - 1].shape[2:] SCREAMING_SNAKE_CASE : Dict = laterals[i - 1] + nn.functional.interpolate( laterals[i], size=A, mode='bilinear', align_corners=self.align_corners ) # build outputs SCREAMING_SNAKE_CASE : Optional[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1, 0, -1 ): SCREAMING_SNAKE_CASE : List[Any] = nn.functional.interpolate( fpn_outs[i], size=fpn_outs[0].shape[2:], mode='bilinear', align_corners=self.align_corners ) SCREAMING_SNAKE_CASE : Dict = torch.cat(A, dim=1 ) SCREAMING_SNAKE_CASE : Optional[int] = self.fpn_bottleneck(A ) SCREAMING_SNAKE_CASE : int = self.classifier(A ) return output class _a ( nn.Module ): '''simple docstring''' def __init__( self, A, A = 2, A = 3, A = 1 ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Tuple = config SCREAMING_SNAKE_CASE : Tuple = config.auxiliary_in_channels SCREAMING_SNAKE_CASE : int = config.auxiliary_channels SCREAMING_SNAKE_CASE : Optional[int] = config.auxiliary_num_convs SCREAMING_SNAKE_CASE : int = config.auxiliary_concat_input SCREAMING_SNAKE_CASE : Union[str, Any] = in_index SCREAMING_SNAKE_CASE : List[str] = (kernel_size // 2) * dilation SCREAMING_SNAKE_CASE : str = [] convs.append( UperNetConvModule( self.in_channels, self.channels, kernel_size=A, padding=A, dilation=A ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels, self.channels, kernel_size=A, padding=A, dilation=A ) ) if self.num_convs == 0: SCREAMING_SNAKE_CASE : List[str] = nn.Identity() else: SCREAMING_SNAKE_CASE : List[Any] = nn.Sequential(*A ) if self.concat_input: SCREAMING_SNAKE_CASE : List[Any] = UperNetConvModule( self.in_channels + self.channels, self.channels, kernel_size=A, padding=kernel_size // 2 ) SCREAMING_SNAKE_CASE : List[str] = nn.Convad(self.channels, config.num_labels, kernel_size=1 ) def UpperCamelCase_ ( self ): '''simple docstring''' self.apply(self._init_weights ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if isinstance(A, nn.Convad ): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = encoder_hidden_states[self.in_index] SCREAMING_SNAKE_CASE : Any = self.convs(A ) if self.concat_input: SCREAMING_SNAKE_CASE : List[str] = self.conv_cat(torch.cat([hidden_states, output], dim=1 ) ) SCREAMING_SNAKE_CASE : Optional[int] = self.classifier(A ) return output class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = UperNetConfig A : Optional[Any] = '''pixel_values''' A : Optional[int] = True def UpperCamelCase_ ( self, A ): '''simple docstring''' if isinstance(A, A ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def UpperCamelCase_ ( self ): '''simple docstring''' self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if isinstance(A, A ): SCREAMING_SNAKE_CASE : Tuple = value UpperCamelCase_ = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" UpperCamelCase_ = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( '''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , SCREAMING_SNAKE_CASE , ) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A ): '''simple docstring''' super().__init__(A ) SCREAMING_SNAKE_CASE : Optional[int] = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) SCREAMING_SNAKE_CASE : Any = UperNetHead(A, in_channels=self.backbone.channels ) SCREAMING_SNAKE_CASE : Optional[int] = UperNetFCNHead(A ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) ) @replace_return_docstrings(output_type=A, config_class=_CONFIG_FOR_DOC ) def UpperCamelCase_ ( self, A = None, A = None, A = None, A = None, A = None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE : int = output_attentions if output_attentions is not None else self.config.output_attentions SCREAMING_SNAKE_CASE : Optional[Any] = self.backbone.forward_with_filtered_kwargs( A, output_hidden_states=A, output_attentions=A ) SCREAMING_SNAKE_CASE : Dict = outputs.feature_maps SCREAMING_SNAKE_CASE : List[str] = self.decode_head(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.functional.interpolate(A, size=pixel_values.shape[2:], mode='bilinear', align_corners=A ) SCREAMING_SNAKE_CASE : Optional[int] = None if self.auxiliary_head is not None: SCREAMING_SNAKE_CASE : str = self.auxiliary_head(A ) SCREAMING_SNAKE_CASE : Optional[Any] = nn.functional.interpolate( A, size=pixel_values.shape[2:], mode='bilinear', align_corners=A ) SCREAMING_SNAKE_CASE : List[Any] = None if labels is not None: if self.config.num_labels == 1: raise ValueError('The number of labels should be greater than one' ) else: # compute weighted loss SCREAMING_SNAKE_CASE : Union[str, Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) SCREAMING_SNAKE_CASE : List[Any] = loss_fct(A, A ) SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(A, A ) SCREAMING_SNAKE_CASE : List[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: SCREAMING_SNAKE_CASE : Dict = (logits,) + outputs[1:] else: SCREAMING_SNAKE_CASE : Optional[int] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=A, logits=A, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise TypeError('Input value must be an \'int\' type' ) SCREAMING_SNAKE_CASE : int = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
28
1
'''simple docstring''' import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _a : '''simple docstring''' @staticmethod def UpperCamelCase_ ( *A, **A ): '''simple docstring''' pass @is_pipeline_test @require_vision class _a ( unittest.TestCase ): '''simple docstring''' @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification', ) SCREAMING_SNAKE_CASE : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) SCREAMING_SNAKE_CASE : Optional[int] = image_classifier(A, candidate_labels=['a', 'b', 'c'] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(A ), [ [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}], [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}], ], ) SCREAMING_SNAKE_CASE : Union[str, Any] = image_classifier([image] * 5, candidate_labels=['A', 'B', 'C'], batch_size=2 ) self.assertEqual( nested_simplify(A ), [ [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], ], ) @require_tf def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification', framework='tf' ) SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) SCREAMING_SNAKE_CASE : Optional[int] = image_classifier(A, candidate_labels=['a', 'b', 'c'] ) self.assertEqual( nested_simplify(A ), [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}], ) SCREAMING_SNAKE_CASE : Any = image_classifier([image] * 5, candidate_labels=['A', 'B', 'C'], batch_size=2 ) self.assertEqual( nested_simplify(A ), [ [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], ], ) @slow @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = pipeline( task='zero-shot-image-classification', model='openai/clip-vit-base-patch32', ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) SCREAMING_SNAKE_CASE : List[str] = image_classifier(A, candidate_labels=['cat', 'plane', 'remote'] ) self.assertEqual( nested_simplify(A ), [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ], ) SCREAMING_SNAKE_CASE : List[Any] = image_classifier([image] * 5, candidate_labels=['cat', 'plane', 'remote'], batch_size=2 ) self.assertEqual( nested_simplify(A ), [ [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ], ] * 5, ) @slow @require_tf def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = pipeline( task='zero-shot-image-classification', model='openai/clip-vit-base-patch32', framework='tf' ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) SCREAMING_SNAKE_CASE : List[Any] = image_classifier(A, candidate_labels=['cat', 'plane', 'remote'] ) self.assertEqual( nested_simplify(A ), [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ], ) SCREAMING_SNAKE_CASE : Tuple = image_classifier([image] * 5, candidate_labels=['cat', 'plane', 'remote'], batch_size=2 ) self.assertEqual( nested_simplify(A ), [ [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ], ] * 5, )
28
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ): '''simple docstring''' if tokenize_kwargs is None: SCREAMING_SNAKE_CASE : Optional[int] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) SCREAMING_SNAKE_CASE : Tuple = truncation SCREAMING_SNAKE_CASE : int = tokenize_kwargs SCREAMING_SNAKE_CASE : Optional[Any] = {} if return_tensors is not None: SCREAMING_SNAKE_CASE : Optional[int] = return_tensors return preprocess_params, {}, postprocess_params def UpperCamelCase_ ( self, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.framework SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A ) return model_inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model(**A ) return model_outputs def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self, *A, **A ): '''simple docstring''' return super().__call__(*A, **A )
28
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { "BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json", "BridgeTower/bridgetower-base-itm-mlm": ( "https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json" ), } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Any = '''bridgetower_vision_model''' def __init__( self, A=768, A=12, A=3, A=16, A=288, A=1, A=1E-05, A=False, A=True, A=False, **A, ): '''simple docstring''' super().__init__(**A ) SCREAMING_SNAKE_CASE : List[str] = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : str = patch_size SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : int = initializer_factor SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps SCREAMING_SNAKE_CASE : Any = stop_gradient SCREAMING_SNAKE_CASE : Optional[Any] = share_layernorm SCREAMING_SNAKE_CASE : Optional[int] = remove_last_layer @classmethod def UpperCamelCase_ ( cls, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = cls.get_config_dict(A, **A ) if config_dict.get('model_type' ) == "bridgetower": SCREAMING_SNAKE_CASE : Optional[int] = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(A, **A ) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : str = '''bridgetower_text_model''' def __init__( self, A=50_265, A=768, A=12, A=12, A=1, A=3_072, A="gelu", A=0.1, A=0.1, A=514, A=1, A=1E-05, A=1, A=0, A=2, A="absolute", A=True, **A, ): '''simple docstring''' super().__init__(**A ) SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE : List[Any] = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_attention_heads SCREAMING_SNAKE_CASE : Tuple = hidden_act SCREAMING_SNAKE_CASE : Tuple = initializer_factor SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Dict = max_position_embeddings SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : Tuple = position_embedding_type SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache SCREAMING_SNAKE_CASE : List[Any] = pad_token_id SCREAMING_SNAKE_CASE : List[str] = bos_token_id SCREAMING_SNAKE_CASE : List[str] = eos_token_id @classmethod def UpperCamelCase_ ( cls, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = cls.get_config_dict(A, **A ) if config_dict.get('model_type' ) == "bridgetower": SCREAMING_SNAKE_CASE : int = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(A, **A ) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Union[str, Any] = '''bridgetower''' def __init__( self, A=True, A="gelu", A=768, A=1, A=1E-05, A=False, A="add", A=12, A=6, A=False, A=False, A=None, A=None, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('text_config_dict', A ) SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('vision_config_dict', A ) super().__init__(**A ) SCREAMING_SNAKE_CASE : Union[str, Any] = share_cross_modal_transformer_layers SCREAMING_SNAKE_CASE : str = hidden_act SCREAMING_SNAKE_CASE : Any = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_factor SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : List[str] = share_link_tower_layers SCREAMING_SNAKE_CASE : int = link_tower_type SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = tie_word_embeddings SCREAMING_SNAKE_CASE : str = init_layernorm_from_vision_encoder if text_config is None: SCREAMING_SNAKE_CASE : int = {} logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' ) if vision_config is None: SCREAMING_SNAKE_CASE : int = {} logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' ) SCREAMING_SNAKE_CASE : List[str] = BridgeTowerTextConfig(**A ) SCREAMING_SNAKE_CASE : Tuple = BridgeTowerVisionConfig(**A ) @classmethod def UpperCamelCase_ ( cls, A, A, **A ): '''simple docstring''' return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : List[str] = self.text_config.to_dict() SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type return output
28
'''simple docstring''' from __future__ import annotations import queue class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = data SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : List[str] = None def lowercase__( ): """simple docstring""" print('\n********Press N to stop entering at any point of time********\n' ) SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower() SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) ) q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: " SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = left_node q.put(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: " SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = right_node q.put(__UpperCamelCase ) raise def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return print(node.data ,end=',' ) pre_order(node.left ) pre_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return in_order(node.left ) print(node.data ,end=',' ) in_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data ,end=',' ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Optional[int] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Union[str, Any] = [] while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__UpperCamelCase ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : Optional[Any] = node while n or stack: while n: # start from root node, find its left child print(n.data ,end=',' ) stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE : List[Any] = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE : Any = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : int = node while n or stack: while n: stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = n.left SCREAMING_SNAKE_CASE : Tuple = stack.pop() print(n.data ,end=',' ) SCREAMING_SNAKE_CASE : str = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], [] SCREAMING_SNAKE_CASE : Optional[int] = node stacka.append(__UpperCamelCase ) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__UpperCamelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data ,end=',' ) def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ): """simple docstring""" if not s: return "\n" + width * char SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 ) return f"{left * char} {s} {(left + extra) * char}" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) UpperCamelCase_ = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 5_0 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
28
1
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch UpperCamelCase_ = random.Random() def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Tuple=1.0 ,__UpperCamelCase: int=None ,__UpperCamelCase: str=None ): """simple docstring""" if rng is None: SCREAMING_SNAKE_CASE : Union[str, Any] = global_rng SCREAMING_SNAKE_CASE : int = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=400, A=2_000, A=1, A=0.0, A=16_000, A=True, A=80, A=16, A=64, A="hann_window", A=80, A=7_600, A=1E-10, A=True, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE : Union[str, Any] = min_seq_length SCREAMING_SNAKE_CASE : List[str] = max_seq_length SCREAMING_SNAKE_CASE : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE : Union[str, Any] = feature_size SCREAMING_SNAKE_CASE : Optional[Any] = padding_value SCREAMING_SNAKE_CASE : Optional[int] = sampling_rate SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize SCREAMING_SNAKE_CASE : Tuple = num_mel_bins SCREAMING_SNAKE_CASE : Dict = hop_length SCREAMING_SNAKE_CASE : Optional[Any] = win_length SCREAMING_SNAKE_CASE : Optional[Any] = win_function SCREAMING_SNAKE_CASE : Optional[int] = fmin SCREAMING_SNAKE_CASE : List[Any] = fmax SCREAMING_SNAKE_CASE : str = mel_floor SCREAMING_SNAKE_CASE : Any = return_attention_mask def UpperCamelCase_ ( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def UpperCamelCase_ ( self, A=False, A=False ): '''simple docstring''' def _flatten(A ): return list(itertools.chain(*A ) ) if equal_length: SCREAMING_SNAKE_CASE : Tuple = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE : str = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A ) for x in speech_inputs] return speech_inputs def UpperCamelCase_ ( self, A=False, A=False ): '''simple docstring''' if equal_length: SCREAMING_SNAKE_CASE : int = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE : int = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE : Tuple = [np.asarray(A ) for x in speech_inputs] return speech_inputs @require_torch class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : List[Any] = SpeechTaFeatureExtractor def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = SpeechTaFeatureExtractionTester(self ) def UpperCamelCase_ ( self, A ): '''simple docstring''' self.assertTrue(np.all(np.mean(A, axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(A, axis=0 ) - 1 ) < 1E-3 ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE : Dict = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] SCREAMING_SNAKE_CASE : List[Any] = [np.asarray(A ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE : Dict = feat_extract(speech_inputs[0], return_tensors='np' ).input_values SCREAMING_SNAKE_CASE : Tuple = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values self.assertTrue(np.allclose(A, A, atol=1E-3 ) ) # Test batched SCREAMING_SNAKE_CASE : List[Any] = feat_extract(A, return_tensors='np' ).input_values SCREAMING_SNAKE_CASE : Tuple = feat_extract(A, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(A, A ): self.assertTrue(np.allclose(A, A, atol=1E-3 ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE : List[str] = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] SCREAMING_SNAKE_CASE : Dict = ['longest', 'max_length', 'do_not_pad'] SCREAMING_SNAKE_CASE : Dict = [None, 1_600, None] for max_length, padding in zip(A, A ): SCREAMING_SNAKE_CASE : Dict = feat_extract(A, padding=A, max_length=A, return_tensors='np' ) SCREAMING_SNAKE_CASE : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1_000] ) self.assertTrue(input_values[0][1_000:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1_200] ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE : List[str] = range(800, 1_400, 200 ) SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in lengths] SCREAMING_SNAKE_CASE : str = ['longest', 'max_length', 'do_not_pad'] SCREAMING_SNAKE_CASE : int = [None, 1_600, None] for max_length, padding in zip(A, A ): SCREAMING_SNAKE_CASE : List[str] = feat_extract(A, max_length=A, padding=A ) SCREAMING_SNAKE_CASE : Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1_000] ) self._check_zero_mean_unit_variance(input_values[2][:1_200] ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE : List[str] = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] SCREAMING_SNAKE_CASE : Dict = feat_extract( A, truncation=A, max_length=1_000, padding='max_length', return_tensors='np' ) SCREAMING_SNAKE_CASE : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] SCREAMING_SNAKE_CASE : int = feat_extract( A, truncation=A, max_length=1_000, padding='longest', return_tensors='np' ) SCREAMING_SNAKE_CASE : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1_000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1_000) ) SCREAMING_SNAKE_CASE : Tuple = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] SCREAMING_SNAKE_CASE : Tuple = feat_extract( A, truncation=A, max_length=2_000, padding='longest', return_tensors='np' ) SCREAMING_SNAKE_CASE : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1_000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1_200) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE : Optional[Any] = np.random.rand(100 ).astype(np.floataa ) SCREAMING_SNAKE_CASE : Optional[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) SCREAMING_SNAKE_CASE : List[str] = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] SCREAMING_SNAKE_CASE : int = [np.asarray(A ) for speech_input in speech_inputs] # Test feature size SCREAMING_SNAKE_CASE : Any = feature_extractor(audio_target=A, padding=A, return_tensors='np' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input SCREAMING_SNAKE_CASE : str = feature_extractor(speech_inputs[0], return_tensors='np' ).input_values SCREAMING_SNAKE_CASE : Tuple = feature_extractor(np_speech_inputs[0], return_tensors='np' ).input_values self.assertTrue(np.allclose(A, A, atol=1E-3 ) ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(A, return_tensors='np' ).input_values SCREAMING_SNAKE_CASE : int = feature_extractor(A, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(A, A ): self.assertTrue(np.allclose(A, A, atol=1E-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(A ) SCREAMING_SNAKE_CASE : int = feature_extractor(A, return_tensors='np' ).input_values SCREAMING_SNAKE_CASE : Tuple = feature_extractor(A, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(A, A ): self.assertTrue(np.allclose(A, A, atol=1E-3 ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : str = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Any = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(A ) == len(A ) for x, y in zip(A, processed_features[input_name] ) ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=A ) SCREAMING_SNAKE_CASE : Dict = BatchFeature({input_name: speech_inputs}, tensor_type='np' ) SCREAMING_SNAKE_CASE : Dict = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE : str = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=A ) SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : List[Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Tuple = BatchFeature({input_name: speech_inputs}, tensor_type='pt' ) SCREAMING_SNAKE_CASE : List[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE : Union[str, Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : Tuple = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE : Dict = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Dict = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : str = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.pad(A, padding='longest', return_tensors='np' )[input_name] SCREAMING_SNAKE_CASE : Dict = feat_extract.pad(A, padding='longest', return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.feat_extract_dict SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class(**A ) SCREAMING_SNAKE_CASE : Tuple = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE : Optional[Any] = [len(A ) for x in speech_inputs] SCREAMING_SNAKE_CASE : Any = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Dict = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : int = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE : List[Any] = feat_extract.pad(A, padding='longest', return_tensors='np' ) self.assertIn('attention_mask', A ) self.assertListEqual(list(processed.attention_mask.shape ), list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist(), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.feat_extract_dict SCREAMING_SNAKE_CASE : Dict = True SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**A ) SCREAMING_SNAKE_CASE : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target() SCREAMING_SNAKE_CASE : Any = [len(A ) for x in speech_inputs] SCREAMING_SNAKE_CASE : Tuple = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : int = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : Union[str, Any] = min(A ) SCREAMING_SNAKE_CASE : str = feat_extract.num_mel_bins # hack! SCREAMING_SNAKE_CASE : Tuple = feat_extract.pad( A, padding='max_length', max_length=A, truncation=A, return_tensors='np' ) self.assertIn('attention_mask', A ) self.assertListEqual( list(processed_pad.attention_mask.shape ), [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist(), [max_length for x in speech_inputs] ) def UpperCamelCase_ ( self, A ): '''simple docstring''' from datasets import load_dataset SCREAMING_SNAKE_CASE : str = load_dataset('hf-internal-testing/librispeech_asr_dummy', 'clean', split='validation' ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE : Dict = ds.sort('id' ).select(range(A ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = torch.tensor( [2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03, 3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03, 2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04, 4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03, 7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04, 4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] ) # fmt: on SCREAMING_SNAKE_CASE : str = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE : Dict = SpeechTaFeatureExtractor() SCREAMING_SNAKE_CASE : Tuple = feature_extractor(A, return_tensors='pt' ).input_values self.assertEquals(input_values.shape, (1, 93_680) ) self.assertTrue(torch.allclose(input_values[0, :30], A, atol=1E-6 ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = torch.tensor( [-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77, -3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86, -3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71, -3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] ) # fmt: on SCREAMING_SNAKE_CASE : Optional[int] = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaFeatureExtractor() SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(audio_target=A, return_tensors='pt' ).input_values self.assertEquals(input_values.shape, (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30], A, atol=1E-4 ) )
28
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class _a : '''simple docstring''' def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = device SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A ) SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std ) SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 ) SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.resize(A ) SCREAMING_SNAKE_CASE : Any = self.center_crop(A ) SCREAMING_SNAKE_CASE : str = self.normalize(A ) return images def __call__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A ) SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class _a ( nn.Module ): '''simple docstring''' def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device() if vqgan: SCREAMING_SNAKE_CASE : Optional[Any] = vqgan else: SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A ) self.vqgan.eval() if clip: SCREAMING_SNAKE_CASE : List[str] = clip else: SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' ) self.clip.to(self.device ) SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = iterations SCREAMING_SNAKE_CASE : Tuple = lr SCREAMING_SNAKE_CASE : Tuple = log SCREAMING_SNAKE_CASE : str = make_grid SCREAMING_SNAKE_CASE : Dict = return_val SCREAMING_SNAKE_CASE : Union[str, Any] = quantize SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [] if output_path is None: SCREAMING_SNAKE_CASE : int = './animation.gif' if input_path is None: SCREAMING_SNAKE_CASE : Optional[int] = self.save_path SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) ) if not len(A ): raise ValueError( 'No images found in save path, aborting (did you pass save_intermediate=True to the generate' ' function?)' ) if len(A ) == 1: print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' ) SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A ) SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A ) if extend_frames: SCREAMING_SNAKE_CASE : List[str] = 1.5 SCREAMING_SNAKE_CASE : int = 3 for file_name in paths: if file_name.endswith('.png' ): images.append(imageio.imread(A ) ) imageio.mimsave(A, A, duration=A ) print(F"gif saved to {output_path}" ) def UpperCamelCase_ ( self, A=None, A=None ): '''simple docstring''' if not (path or img): raise ValueError('Input either path or tensor' ) if img is not None: raise NotImplementedError SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device ) SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A ) SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A ) return z def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_() SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector if self.quantize: SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A ) else: SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent return self.vqgan.decode(A ) def UpperCamelCase_ ( self, A, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A ) SCREAMING_SNAKE_CASE : str = self.clip(**A ) SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image if weights is not None: SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) ) if neg_prompts: SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] ) else: SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device ) SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A ) return loss def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A ) SCREAMING_SNAKE_CASE : Dict = loop_post_process(A ) SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A ) print('CLIP loss', A ) if self.log: wandb.log({'CLIP Loss': clip_loss} ) clip_loss.backward(retain_graph=A ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' wandb.init(reinit=A, project='face-editor' ) wandb.config.update({'Positive Prompts': positive_prompts} ) wandb.config.update({'Negative Prompts': negative_prompts} ) wandb.config.update({'lr': self.lr, 'iterations': self.iterations} ) if image_path: SCREAMING_SNAKE_CASE : Tuple = Image.open(A ) SCREAMING_SNAKE_CASE : int = image.resize((256, 256) ) wandb.log('Original Image', wandb.Image(A ) ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if not prompts: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Dict = [] if isinstance(A, A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )] for prompt in prompts: if isinstance(A, (tuple, list) ): SCREAMING_SNAKE_CASE : List[str] = prompt[0] SCREAMING_SNAKE_CASE : Any = float(prompt[1] ) elif ":" in prompt: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' ) SCREAMING_SNAKE_CASE : Any = float(A ) else: SCREAMING_SNAKE_CASE : Dict = prompt SCREAMING_SNAKE_CASE : List[Any] = 1.0 processed_prompts.append(A ) weights.append(A ) return { "prompts": processed_prompts, "weights": torch.tensor(A, device=self.device ), } def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ): '''simple docstring''' if image_path: SCREAMING_SNAKE_CASE : int = self._get_latent(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(A, A, A ) assert pos_prompts, "You must provide at least one positive prompt." SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A ) if save_final and save_path is None: SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) ) if not os.path.exists(A ): os.makedirs(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp() os.makedirs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = save_path SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print('Original Image' ) show_pil(custom_to_pil(A ) ) SCREAMING_SNAKE_CASE : int = loop_post_process(A ) for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ): if show_intermediate: show_pil(A ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) ) if self.log: wandb.log({'Image': wandb.Image(A )} ) if show_final: show_pil(A ) if save_final: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
28
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _a : '''simple docstring''' def __init__( self, A, A=13, A=7, A=True, A=True, A=True, A=99, A=32, A=5, A=4, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=3, A=4, A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = parent SCREAMING_SNAKE_CASE : str = batch_size SCREAMING_SNAKE_CASE : Dict = seq_length SCREAMING_SNAKE_CASE : Tuple = is_training SCREAMING_SNAKE_CASE : Dict = use_token_type_ids SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : List[str] = hidden_size SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Tuple = intermediate_size SCREAMING_SNAKE_CASE : Any = hidden_act SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings SCREAMING_SNAKE_CASE : Dict = type_vocab_size SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size SCREAMING_SNAKE_CASE : Any = initializer_range SCREAMING_SNAKE_CASE : List[Any] = num_labels SCREAMING_SNAKE_CASE : Optional[Any] = num_choices SCREAMING_SNAKE_CASE : Dict = scope SCREAMING_SNAKE_CASE : Optional[int] = self.vocab_size - 1 def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) SCREAMING_SNAKE_CASE : List[Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Tuple = None if self.use_labels: SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size], self.num_choices ) SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, ) SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def UpperCamelCase_ ( self, A, A, A, A, *A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = OpenAIGPTModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model(A, token_type_ids=A, head_mask=A ) SCREAMING_SNAKE_CASE : Dict = model(A, token_type_ids=A ) SCREAMING_SNAKE_CASE : str = model(A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self, A, A, A, A, *A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = OpenAIGPTLMHeadModel(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Union[str, Any] = model(A, token_type_ids=A, labels=A ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self, A, A, A, A, *A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = OpenAIGPTDoubleHeadsModel(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : int = model(A, token_type_ids=A, labels=A ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self, A, A, A, A, *A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.num_labels SCREAMING_SNAKE_CASE : int = OpenAIGPTForSequenceClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : Tuple = model(A, token_type_ids=A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE : Dict = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A : Dict = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A : Tuple = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase_ ( self, A, A, A, A, A ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def UpperCamelCase_ ( self, A, A, A=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = super()._prepare_for_class(A, A, return_labels=A ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": SCREAMING_SNAKE_CASE : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=A, ) SCREAMING_SNAKE_CASE : List[Any] = inputs_dict['labels'] SCREAMING_SNAKE_CASE : List[str] = inputs_dict['labels'] SCREAMING_SNAKE_CASE : int = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=A, ) SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=A ) return inputs_dict def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = OpenAIGPTModelTester(self ) SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self, config_class=A, n_embd=37 ) def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_torch class _a ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(A ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[481, 4_735, 544]], dtype=torch.long, device=A ) # the president is SCREAMING_SNAKE_CASE : Dict = [ 481, 4_735, 544, 246, 963, 870, 762, 239, 244, 40_477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the SCREAMING_SNAKE_CASE : Dict = model.generate(A, do_sample=A ) self.assertListEqual(output_ids[0].tolist(), A )
28
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A ) def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet( A, A, A, A, A, A, A, A, A, A, A, ) # merge samples if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample else: SCREAMING_SNAKE_CASE : str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(A, A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Optional[int] = save_directory for controlnet in self.nets: controlnet.save_pretrained( A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, ) idx += 1 SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}" @classmethod def UpperCamelCase_ ( cls, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : List[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path while os.path.isdir(A ): SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A ) controlnets.append(A ) idx += 1 SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}" logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." ) if len(A ) == 0: raise ValueError( F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." ) return cls(A )
28
1
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): UpperCamelCase_ = "pt" elif is_tf_available(): UpperCamelCase_ = "tf" else: UpperCamelCase_ = "jax" class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Optional[int] = ByTaTokenizer A : Tuple = False def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE : List[Any] = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return ByTaTokenizer.from_pretrained('google/byt5-small' ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A, A=False, A=20, A=5 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [] for i in range(len(A ) ): try: SCREAMING_SNAKE_CASE : int = tokenizer.decode([i], clean_up_tokenization_spaces=A ) except UnicodeDecodeError: pass toks.append((i, tok) ) SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda A : re.match(r'^[ a-zA-Z]+$', t[1] ), A ) ) SCREAMING_SNAKE_CASE : Any = list(filter(lambda A : [t[0]] == tokenizer.encode(t[1], add_special_tokens=A ), A ) ) if max_length is not None and len(A ) > max_length: SCREAMING_SNAKE_CASE : List[Any] = toks[:max_length] if min_length is not None and len(A ) < min_length and len(A ) > 0: while len(A ) < min_length: SCREAMING_SNAKE_CASE : str = toks + toks # toks_str = [t[1] for t in toks] SCREAMING_SNAKE_CASE : str = [t[0] for t in toks] # Ensure consistency SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(A, clean_up_tokenization_spaces=A ) if " " not in output_txt and len(A ) > 1: SCREAMING_SNAKE_CASE : Optional[int] = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=A ) + ' ' + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=A ) ) if with_prefix_space: SCREAMING_SNAKE_CASE : Dict = ' ' + output_txt SCREAMING_SNAKE_CASE : Any = tokenizer.encode(A, add_special_tokens=A ) return output_txt, output_ids def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.ta_base_tokenizer SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) SCREAMING_SNAKE_CASE : str = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'], batch_without_eos_added['input_ids'] ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.ta_base_tokenizer SCREAMING_SNAKE_CASE : Dict = 'Unicode €.' SCREAMING_SNAKE_CASE : Any = tokenizer(A ) SCREAMING_SNAKE_CASE : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'], A ) # decoding SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(A ) self.assertEqual(A, 'Unicode €.</s>' ) SCREAMING_SNAKE_CASE : int = tokenizer('e è é ê ë' ) SCREAMING_SNAKE_CASE : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'], A ) # decoding SCREAMING_SNAKE_CASE : str = tokenizer.decode(A ) self.assertEqual(A, 'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), 'e è é ê ë</s>' ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.ta_base_tokenizer SCREAMING_SNAKE_CASE : Tuple = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off SCREAMING_SNAKE_CASE : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on SCREAMING_SNAKE_CASE : Any = tokenizer(A, padding=A, return_tensors=A ) self.assertIsInstance(A, A ) if FRAMEWORK != "jax": SCREAMING_SNAKE_CASE : List[str] = list(batch.input_ids.numpy()[0] ) else: SCREAMING_SNAKE_CASE : List[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(A, A ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.ta_base_tokenizer SCREAMING_SNAKE_CASE : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(A, padding=A, return_tensors=A ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids', A ) self.assertIn('attention_mask', A ) self.assertNotIn('decoder_input_ids', A ) self.assertNotIn('decoder_attention_mask', A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.ta_base_tokenizer SCREAMING_SNAKE_CASE : List[Any] = [ 'Summary of the text.', 'Another summary.', ] SCREAMING_SNAKE_CASE : str = tokenizer( text_target=A, max_length=32, padding='max_length', truncation=A, return_tensors=A ) self.assertEqual(32, targets['input_ids'].shape[1] ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.ta_base_tokenizer SCREAMING_SNAKE_CASE : List[str] = ['A long paragraph for summarization. </s>'] SCREAMING_SNAKE_CASE : Tuple = ['Summary of the text. </s>'] # fmt: off SCREAMING_SNAKE_CASE : Dict = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] SCREAMING_SNAKE_CASE : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on SCREAMING_SNAKE_CASE : int = tokenizer(A, text_target=A ) self.assertEqual(A, batch['input_ids'][0] ) self.assertEqual(A, batch['labels'][0] ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : int = ' He is very happy, UNwant\u00E9d,running' SCREAMING_SNAKE_CASE : Any = tokenizer.encode(A, add_special_tokens=A ) tokenizer.save_pretrained(A ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.__class__.from_pretrained(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = after_tokenizer.encode(A, add_special_tokens=A ) self.assertListEqual(A, A ) shutil.rmtree(A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc SCREAMING_SNAKE_CASE : str = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : int = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A ) tokenizer.save_pretrained(A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.__class__.from_pretrained(A ) SCREAMING_SNAKE_CASE : List[Any] = after_tokenizer.encode(A, add_special_tokens=A ) self.assertListEqual(A, A ) self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) SCREAMING_SNAKE_CASE : Dict = tokenizer.__class__.from_pretrained(A, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(A ) with open(os.path.join(A, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file: SCREAMING_SNAKE_CASE : List[Any] = json.load(A ) with open(os.path.join(A, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file: SCREAMING_SNAKE_CASE : Any = json.load(A ) SCREAMING_SNAKE_CASE : Optional[Any] = [F"<extra_id_{i}>" for i in range(125 )] SCREAMING_SNAKE_CASE : List[Any] = added_tokens_extra_ids + [ 'an_additional_special_token' ] SCREAMING_SNAKE_CASE : Union[str, Any] = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(A, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile: json.dump(A, A ) with open(os.path.join(A, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile: json.dump(A, A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files SCREAMING_SNAKE_CASE : Dict = tokenizer_class.from_pretrained( A, ) self.assertIn( 'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained SCREAMING_SNAKE_CASE : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=A )] SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_class.from_pretrained( A, additional_special_tokens=A, ) self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_class.from_pretrained(A ) self.assertTrue(tokenizer.decode([255] ) == '' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizers(fast=A, do_lower_case=A ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE : Optional[Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_tokens_to_string(A ) self.assertIsInstance(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE : Union[str, Any] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_ids_to_tokens( A, skip_special_tokens=A ) for attr in attributes_list: setattr(A, attr + '_id', A ) self.assertEqual(getattr(A, A ), A ) self.assertEqual(getattr(A, attr + '_id' ), A ) setattr(A, attr + '_id', A ) self.assertEqual(getattr(A, A ), A ) self.assertEqual(getattr(A, attr + '_id' ), A ) setattr(A, 'additional_special_tokens_ids', [] ) self.assertListEqual(getattr(A, 'additional_special_tokens' ), [] ) self.assertListEqual(getattr(A, 'additional_special_tokens_ids' ), [] ) setattr(A, 'additional_special_tokens_ids', [token_id_to_test_setters] ) self.assertListEqual(getattr(A, 'additional_special_tokens' ), [token_to_test_setters] ) self.assertListEqual(getattr(A, 'additional_special_tokens_ids' ), [token_id_to_test_setters] )
28
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : str = ['''audio_values''', '''audio_mask'''] def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ): '''simple docstring''' super().__init__( feature_size=A, sampling_rate=A, padding_value=A, **A, ) SCREAMING_SNAKE_CASE : str = spectrogram_length SCREAMING_SNAKE_CASE : Optional[Any] = num_channels SCREAMING_SNAKE_CASE : List[str] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1] SCREAMING_SNAKE_CASE : Dict = n_fft SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate SCREAMING_SNAKE_CASE : str = sampling_rate SCREAMING_SNAKE_CASE : int = padding_value SCREAMING_SNAKE_CASE : Any = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = spectrogram( A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, ) SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1] SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0 SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled" F" with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) SCREAMING_SNAKE_CASE : int = is_batched_numpy or ( isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A, np.ndarray ): SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa ) elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis SCREAMING_SNAKE_CASE : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask SCREAMING_SNAKE_CASE : Tuple = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: SCREAMING_SNAKE_CASE : List[Any] = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa ) # convert into correct format for padding SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value for i in range(len(A ) ): SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i] SCREAMING_SNAKE_CASE : Union[str, Any] = feature # return as BatchFeature if return_attention_mask: SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features} SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A ) return encoded_inputs
28
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase_ = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841 SCREAMING_SNAKE_CASE : Optional[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] ) SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
28
1
'''simple docstring''' UpperCamelCase_ = range(2, 2_0 + 1) UpperCamelCase_ = [1_0**k for k in range(ks[-1] + 1)] UpperCamelCase_ = {} def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Optional[Any] ,__UpperCamelCase: Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = sum(a_i[j] for j in range(__UpperCamelCase ,len(__UpperCamelCase ) ) ) SCREAMING_SNAKE_CASE : Any = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) ,__UpperCamelCase ) ) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = 0, 0 SCREAMING_SNAKE_CASE : List[Any] = n - i SCREAMING_SNAKE_CASE : Optional[int] = memo.get(__UpperCamelCase ) if sub_memo is not None: SCREAMING_SNAKE_CASE : Any = sub_memo.get(__UpperCamelCase ) if jumps is not None and len(__UpperCamelCase ) > 0: # find and make the largest jump without going over SCREAMING_SNAKE_CASE : Any = -1 for _k in range(len(__UpperCamelCase ) - 1 ,-1 ,-1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: SCREAMING_SNAKE_CASE : str = _k break if max_jump >= 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = jumps[max_jump] # since the difference between jumps is cached, add c SCREAMING_SNAKE_CASE : Optional[Any] = diff + c for j in range(min(__UpperCamelCase ,len(__UpperCamelCase ) ) ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = divmod(__UpperCamelCase ,10 ) if new_c > 0: add(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) else: SCREAMING_SNAKE_CASE : Any = [] else: SCREAMING_SNAKE_CASE : Union[str, Any] = {c: []} SCREAMING_SNAKE_CASE : Optional[int] = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = next_term(__UpperCamelCase ,k - 1 ,i + dn ,__UpperCamelCase ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = compute(__UpperCamelCase ,__UpperCamelCase ,i + dn ,__UpperCamelCase ) diff += _diff dn += terms_jumped SCREAMING_SNAKE_CASE : Optional[int] = sub_memo[c] # keep jumps sorted by # of terms skipped SCREAMING_SNAKE_CASE : Optional[Any] = 0 while j < len(__UpperCamelCase ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(__UpperCamelCase ,(diff, dn, k) ) return (diff, dn) def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ,__UpperCamelCase: Union[str, Any] ): """simple docstring""" if i >= n: return 0, i if k > len(__UpperCamelCase ): a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) SCREAMING_SNAKE_CASE : Optional[int] = i SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = 0, 0, 0 for j in range(len(__UpperCamelCase ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 SCREAMING_SNAKE_CASE : List[Any] = ds_c + ds_b diff += addend SCREAMING_SNAKE_CASE : Optional[int] = 0 for j in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE : Optional[int] = a_i[j] + addend SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = divmod(__UpperCamelCase ,10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) return diff, i - start_i def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Union[str, Any] ): """simple docstring""" for j in range(__UpperCamelCase ,len(__UpperCamelCase ) ): SCREAMING_SNAKE_CASE : List[str] = digits[j] + addend if s >= 10: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = divmod(__UpperCamelCase ,10 ) SCREAMING_SNAKE_CASE : List[str] = addend // 10 + quotient else: SCREAMING_SNAKE_CASE : str = s SCREAMING_SNAKE_CASE : Dict = addend // 10 if addend == 0: break while addend > 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = divmod(__UpperCamelCase ,10 ) digits.append(__UpperCamelCase ) def lowercase__( __UpperCamelCase: int = 10**15 ): """simple docstring""" SCREAMING_SNAKE_CASE : int = [1] SCREAMING_SNAKE_CASE : Any = 1 SCREAMING_SNAKE_CASE : List[str] = 0 while True: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next_term(__UpperCamelCase ,20 ,i + dn ,__UpperCamelCase ) dn += terms_jumped if dn == n - i: break SCREAMING_SNAKE_CASE : int = 0 for j in range(len(__UpperCamelCase ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F"""{solution() = }""")
28
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : int = StableDiffusionDiffEditPipeline A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} A : str = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A : Union[str, Any] = frozenset([] ) def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, ) SCREAMING_SNAKE_CASE : int = DDIMScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, ) SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, ) SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE : int = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Dict = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Any = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' if not hasattr(self.pipeline_class, '_optional_components' ): return SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A, A, A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0] SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max() self.assertLess(A, 1E-4 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = 'cpu' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A ) SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16) ) SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 ) SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) self.assertEqual(mask[0, -3, -4], 0 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A ) SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A ) SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) @require_torch_gpu @slow class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) ) SCREAMING_SNAKE_CASE : List[str] = raw_image def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit' SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears' SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents SCREAMING_SNAKE_CASE : List[str] = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : List[Any] = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = 'a bowl of fruit' SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears' SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents SCREAMING_SNAKE_CASE : str = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : Tuple = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
28
1
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def lowercase__( ): """simple docstring""" import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join SCREAMING_SNAKE_CASE : Tuple = '__test_patch_submodule_mock__' with patch_submodule(_test_patching ,'os.path.join' ,__UpperCamelCase ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os ,_PatchedModuleObj ) assert isinstance(_test_patching.os.path ,_PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path ,_PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os ,_PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path ,_PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path ,_PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def lowercase__( ): """simple docstring""" assert _test_patching.open is open SCREAMING_SNAKE_CASE : Tuple = '__test_patch_submodule_builtin_mock__' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching ,'open' ,__UpperCamelCase ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = '__test_patch_submodule_missing_mock__' with patch_submodule(_test_patching ,'pandas.read_csv' ,__UpperCamelCase ): pass def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = '__test_patch_submodule_missing_builtin_mock__' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching ,'len' ,__UpperCamelCase ) is None with patch_submodule(_test_patching ,'len' ,__UpperCamelCase ): assert _test_patching.len is mock assert _test_patching.len is len def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = '__test_patch_submodule_start_and_stop_mock__' SCREAMING_SNAKE_CASE : Optional[Any] = patch_submodule(_test_patching ,'open' ,__UpperCamelCase ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def lowercase__( ): """simple docstring""" from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join SCREAMING_SNAKE_CASE : int = '__test_patch_submodule_successive_join__' SCREAMING_SNAKE_CASE : int = '__test_patch_submodule_successive_dirname__' SCREAMING_SNAKE_CASE : List[str] = '__test_patch_submodule_successive_rename__' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching ,'os.path.join' ,__UpperCamelCase ): with patch_submodule(_test_patching ,'os.rename' ,__UpperCamelCase ): with patch_submodule(_test_patching ,'os.path.dirname' ,__UpperCamelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching ,'os.rename' ,__UpperCamelCase ): with patch_submodule(_test_patching ,'os.path.join' ,__UpperCamelCase ): with patch_submodule(_test_patching ,'os.path.dirname' ,__UpperCamelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = '__test_patch_submodule_doesnt_exist_mock__' with patch_submodule(_test_patching ,'__module_that_doesn_exist__.__attribute_that_doesn_exist__' ,__UpperCamelCase ): pass with patch_submodule(_test_patching ,'os.__attribute_that_doesn_exist__' ,__UpperCamelCase ): pass
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int = 1_00_00_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )] for i in range(2 ,limit + 1 ): if phi[i] == i - 1: for j in range(2 * i ,limit + 1 ,__UpperCamelCase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
28
1
'''simple docstring''' import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Any = (IPNDMScheduler,) A : List[Any] = (('''num_inference_steps''', 50),) def UpperCamelCase_ ( self, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {'num_train_timesteps': 1_000} config.update(**A ) return config def UpperCamelCase_ ( self, A=0, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = dict(self.forward_default_kwargs ) SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('num_inference_steps', A ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 * sample SCREAMING_SNAKE_CASE : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config(**A ) SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**A ) scheduler.set_timesteps(A ) # copy over dummy past residuals SCREAMING_SNAKE_CASE : Optional[int] = dummy_past_residuals[:] if time_step is None: SCREAMING_SNAKE_CASE : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(A ) SCREAMING_SNAKE_CASE : Any = scheduler_class.from_pretrained(A ) new_scheduler.set_timesteps(A ) # copy over dummy past residuals SCREAMING_SNAKE_CASE : Any = dummy_past_residuals[:] SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(A, A, A, **A ).prev_sample SCREAMING_SNAKE_CASE : Tuple = new_scheduler.step(A, A, A, **A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE : Dict = scheduler.step(A, A, A, **A ).prev_sample SCREAMING_SNAKE_CASE : str = new_scheduler.step(A, A, A, **A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self, A=0, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = dict(self.forward_default_kwargs ) SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('num_inference_steps', A ) SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 * sample SCREAMING_SNAKE_CASE : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**A ) scheduler.set_timesteps(A ) # copy over dummy past residuals (must be after setting timesteps) SCREAMING_SNAKE_CASE : List[str] = dummy_past_residuals[:] if time_step is None: SCREAMING_SNAKE_CASE : Any = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class.from_pretrained(A ) # copy over dummy past residuals new_scheduler.set_timesteps(A ) # copy over dummy past residual (must be after setting timesteps) SCREAMING_SNAKE_CASE : List[str] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE : str = scheduler.step(A, A, A, **A ).prev_sample SCREAMING_SNAKE_CASE : int = new_scheduler.step(A, A, A, **A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE : str = scheduler.step(A, A, A, **A ).prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = new_scheduler.step(A, A, A, **A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config(**A ) SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**A ) SCREAMING_SNAKE_CASE : int = 10 SCREAMING_SNAKE_CASE : List[str] = self.dummy_model() SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample_deter scheduler.set_timesteps(A ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : List[Any] = model(A, A ) SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(A, A, A ).prev_sample for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : List[str] = model(A, A ) SCREAMING_SNAKE_CASE : int = scheduler.step(A, A, A ).prev_sample return sample def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = dict(self.forward_default_kwargs ) SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('num_inference_steps', A ) for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**A ) SCREAMING_SNAKE_CASE : Any = self.dummy_sample SCREAMING_SNAKE_CASE : Any = 0.1 * sample if num_inference_steps is not None and hasattr(A, 'set_timesteps' ): scheduler.set_timesteps(A ) elif num_inference_steps is not None and not hasattr(A, 'set_timesteps' ): SCREAMING_SNAKE_CASE : Any = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) SCREAMING_SNAKE_CASE : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] SCREAMING_SNAKE_CASE : List[str] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE : Any = scheduler.timesteps[5] SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.timesteps[6] SCREAMING_SNAKE_CASE : int = scheduler.step(A, A, A, **A ).prev_sample SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(A, A, A, **A ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) SCREAMING_SNAKE_CASE : str = scheduler.step(A, A, A, **A ).prev_sample SCREAMING_SNAKE_CASE : int = scheduler.step(A, A, A, **A ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def UpperCamelCase_ ( self ): '''simple docstring''' for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=A, time_step=A ) def UpperCamelCase_ ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=A, time_step=A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.full_loop() SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(A ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
28
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : str = LongformerTokenizer A : List[str] = True A : Optional[int] = LongformerTokenizerFast A : Tuple = True def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE : Any = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) ) SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(A ) + '\n' ) with open(self.merges_file, 'w', encoding='utf-8' ) as fp: fp.write('\n'.join(A ) ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'lower newer' SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer' return input_text, output_text def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer' SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A ) SCREAMING_SNAKE_CASE : int = tokenizer.encode( 'sequence builders', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode( 'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.' SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A, A ) SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A, A ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A, A ) # Testing spaces after special tokens SCREAMING_SNAKE_CASE : Optional[int] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A ) SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence' SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence' SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Tuple = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A, A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.' SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), ) SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def UpperCamelCase_ ( self ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ): SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['trim_offsets'], A ) def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}" SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
28
1
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=3, A=18, A=30, A=400, A=True, A=None, A=True, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE : List[str] = parent SCREAMING_SNAKE_CASE : str = batch_size SCREAMING_SNAKE_CASE : Dict = num_channels SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : str = min_resolution SCREAMING_SNAKE_CASE : List[Any] = max_resolution SCREAMING_SNAKE_CASE : int = do_resize SCREAMING_SNAKE_CASE : str = size SCREAMING_SNAKE_CASE : Optional[int] = do_normalize def UpperCamelCase_ ( self ): '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Tuple = ImageGPTImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ImageGPTImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A, 'clusters' ) ) self.assertTrue(hasattr(A, 'do_resize' ) ) self.assertTrue(hasattr(A, 'size' ) ) self.assertTrue(hasattr(A, 'do_normalize' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'height': 18, 'width': 18} ) SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {'height': 42, 'width': 42} ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE : Optional[int] = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(A, obj[key] ) ) else: self.assertEqual(obj[key], A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(A, 'image_processor.json' ) image_processor_first.to_json_file(A ) SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_json_file(A ).to_dict() SCREAMING_SNAKE_CASE : List[str] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(A, image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key], A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_pretrained(A ).to_dict() SCREAMING_SNAKE_CASE : List[str] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(A, image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key], A ) @unittest.skip('ImageGPT requires clusters at initialization' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = load_dataset('hf-internal-testing/fixtures_image_utils' ,split='test' ) SCREAMING_SNAKE_CASE : Dict = Image.open(dataset[4]['file'] ) SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(dataset[5]['file'] ) SCREAMING_SNAKE_CASE : str = [imagea, imagea] return images @require_vision @require_torch class _a ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' ) SCREAMING_SNAKE_CASE : List[str] = prepare_images() # test non-batched SCREAMING_SNAKE_CASE : Dict = image_processing(images[0], return_tensors='pt' ) self.assertIsInstance(encoding.input_ids, torch.LongTensor ) self.assertEqual(encoding.input_ids.shape, (1, 1_024) ) SCREAMING_SNAKE_CASE : List[str] = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist(), A ) # test batched SCREAMING_SNAKE_CASE : List[Any] = image_processing(A, return_tensors='pt' ) self.assertIsInstance(encoding.input_ids, torch.LongTensor ) self.assertEqual(encoding.input_ids.shape, (2, 1_024) ) SCREAMING_SNAKE_CASE : int = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist(), A )
28
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Union[str, Any] = StableDiffusionXLImgaImgPipeline A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} A : str = PipelineTesterMixin.required_optional_params - {'''latents'''} A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, ) SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler( beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, ) SCREAMING_SNAKE_CASE : int = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A ) SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : str = image / 2 + 0.5 if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : List[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : str = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : str = sd_pipe.to(A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) # forward without prompt embeds SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']] SCREAMING_SNAKE_CASE : int = sd_pipe(**A ) SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )] ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe( **A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, ) SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) ) SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A ) SCREAMING_SNAKE_CASE : str = pipe(**A ).images SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
28
1
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ): '''simple docstring''' if tokenize_kwargs is None: SCREAMING_SNAKE_CASE : Optional[int] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) SCREAMING_SNAKE_CASE : Tuple = truncation SCREAMING_SNAKE_CASE : int = tokenize_kwargs SCREAMING_SNAKE_CASE : Optional[Any] = {} if return_tensors is not None: SCREAMING_SNAKE_CASE : Optional[int] = return_tensors return preprocess_params, {}, postprocess_params def UpperCamelCase_ ( self, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.framework SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A ) return model_inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model(**A ) return model_outputs def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self, *A, **A ): '''simple docstring''' return super().__call__(*A, **A )
28
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Dict = '''char''' A : Any = '''bpe''' A : Dict = '''wp''' UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = ['''image_processor''', '''char_tokenizer'''] A : int = '''ViTImageProcessor''' A : List[str] = '''MgpstrTokenizer''' def __init__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', A, ) SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(A, A ) def __call__( self, A=None, A=None, A=None, **A ): '''simple docstring''' if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A ) if text is not None: SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE : Any = encodings['input_ids'] return inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : Tuple = [] for i in range(A ): SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]] SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]] SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) SCREAMING_SNAKE_CASE : List[Any] = {} SCREAMING_SNAKE_CASE : int = final_strs SCREAMING_SNAKE_CASE : Any = final_scores SCREAMING_SNAKE_CASE : Dict = char_strs SCREAMING_SNAKE_CASE : Any = bpe_strs SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs return out def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if format == DecodeType.CHARACTER: SCREAMING_SNAKE_CASE : List[Any] = self.char_decode SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : str = '[s]' elif format == DecodeType.BPE: SCREAMING_SNAKE_CASE : str = self.bpe_decode SCREAMING_SNAKE_CASE : str = 2 SCREAMING_SNAKE_CASE : List[str] = '#' elif format == DecodeType.WORDPIECE: SCREAMING_SNAKE_CASE : Any = self.wp_decode SCREAMING_SNAKE_CASE : Tuple = 102 SCREAMING_SNAKE_CASE : List[Any] = '[SEP]' else: raise ValueError(F"Format {format} is not supported." ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], [] SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 ) SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A ) SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:] SCREAMING_SNAKE_CASE : List[Any] = decoder(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 ) SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:] for index in range(A ): SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A ) SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos] SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist() SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1 SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1] SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A ) conf_scores.append(A ) return dec_strs, conf_scores def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )] return decode_strs def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )] return decode_strs
28
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase_ = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger("transformers.models.speecht5") def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ): """simple docstring""" hf_model.apply_weight_norm() SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v'] SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"] SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v'] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,): """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig() SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase ) load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float() SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float() model.save_pretrained(__UpperCamelCase ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) UpperCamelCase_ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
28
1
'''simple docstring''' import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params UpperCamelCase_ = getLogger(__name__) UpperCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu" def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: str ,__UpperCamelCase: str ,__UpperCamelCase: int = 8 ,__UpperCamelCase: str = DEFAULT_DEVICE ,__UpperCamelCase: Optional[int]=False ,__UpperCamelCase: Tuple="summarization" ,__UpperCamelCase: Tuple=None ,**__UpperCamelCase: List[str] ,): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = Path(__UpperCamelCase ).open('w' ,encoding='utf-8' ) SCREAMING_SNAKE_CASE : int = str(__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase ) if fpaa: SCREAMING_SNAKE_CASE : Any = model.half() SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(__UpperCamelCase ) logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. SCREAMING_SNAKE_CASE : Dict = time.time() # update config with task specific params use_task_specific_params(__UpperCamelCase ,__UpperCamelCase ) if prefix is None: SCREAMING_SNAKE_CASE : int = prefix or getattr(model.config ,'prefix' ,'' ) or '' for examples_chunk in tqdm(list(chunks(__UpperCamelCase ,__UpperCamelCase ) ) ): SCREAMING_SNAKE_CASE : Any = [prefix + text for text in examples_chunk] SCREAMING_SNAKE_CASE : Tuple = tokenizer(__UpperCamelCase ,return_tensors='pt' ,truncation=__UpperCamelCase ,padding='longest' ).to(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = model.generate( input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**__UpperCamelCase ,) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase ) for hypothesis in dec: fout.write(hypothesis + '\n' ) fout.flush() fout.close() SCREAMING_SNAKE_CASE : Tuple = int(time.time() - start_time ) # seconds SCREAMING_SNAKE_CASE : Union[str, Any] = len(__UpperCamelCase ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )} def lowercase__( ): """simple docstring""" return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' ) def lowercase__( __UpperCamelCase: List[str]=True ): """simple docstring""" SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument('model_name' ,type=__UpperCamelCase ,help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('input_path' ,type=__UpperCamelCase ,help='like cnn_dm/test.source' ) parser.add_argument('save_path' ,type=__UpperCamelCase ,help='where to save summaries' ) parser.add_argument('--reference_path' ,type=__UpperCamelCase ,required=__UpperCamelCase ,help='like cnn_dm/test.target' ) parser.add_argument('--score_path' ,type=__UpperCamelCase ,required=__UpperCamelCase ,default='metrics.json' ,help='where to save metrics' ) parser.add_argument('--device' ,type=__UpperCamelCase ,required=__UpperCamelCase ,default=__UpperCamelCase ,help='cuda, cuda:1, cpu etc.' ) parser.add_argument( '--prefix' ,type=__UpperCamelCase ,required=__UpperCamelCase ,default=__UpperCamelCase ,help='will be added to the begininng of src examples' ) parser.add_argument('--task' ,type=__UpperCamelCase ,default='summarization' ,help='used for task_specific_params + metrics' ) parser.add_argument('--bs' ,type=__UpperCamelCase ,default=8 ,required=__UpperCamelCase ,help='batch size' ) parser.add_argument( '--n_obs' ,type=__UpperCamelCase ,default=-1 ,required=__UpperCamelCase ,help='How many observations. Defaults to all.' ) parser.add_argument('--fp16' ,action='store_true' ) parser.add_argument('--dump-args' ,action='store_true' ,help='print the custom hparams with the results' ) parser.add_argument( '--info' ,nargs='?' ,type=__UpperCamelCase ,const=datetime_now() ,help=( 'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.' ' lang=en-ru. If no value is passed, the current datetime string will be used.' ) ,) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = parser.parse_known_args() SCREAMING_SNAKE_CASE : int = parse_numeric_n_bool_cl_kwargs(__UpperCamelCase ) if parsed_args and verbose: print(f"parsed the following generate kwargs: {parsed_args}" ) SCREAMING_SNAKE_CASE : Optional[int] = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: SCREAMING_SNAKE_CASE : str = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=__UpperCamelCase ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('Can\'t mix --fp16 and --device cpu' ) SCREAMING_SNAKE_CASE : int = generate_summaries_or_translations( __UpperCamelCase ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**__UpperCamelCase ,) if args.reference_path is None: return {} # Compute scores SCREAMING_SNAKE_CASE : Tuple = calculate_bleu if 'translation' in args.task else calculate_rouge SCREAMING_SNAKE_CASE : int = [x.rstrip() for x in open(args.save_path ).readlines()] SCREAMING_SNAKE_CASE : Optional[Any] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__UpperCamelCase )] SCREAMING_SNAKE_CASE : dict = score_fn(__UpperCamelCase ,__UpperCamelCase ) scores.update(__UpperCamelCase ) if args.dump_args: scores.update(__UpperCamelCase ) if args.info: SCREAMING_SNAKE_CASE : Optional[int] = args.info if verbose: print(__UpperCamelCase ) if args.score_path is not None: json.dump(__UpperCamelCase ,open(args.score_path ,'w' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
28
'''simple docstring''' from typing import Any class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = data SCREAMING_SNAKE_CASE : Any = None def __repr__( self ): '''simple docstring''' return F"Node({self.data})" class _a : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = None def __iter__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.head while node: yield node.data SCREAMING_SNAKE_CASE : List[str] = node.next def __len__( self ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ): '''simple docstring''' return "->".join([str(A ) for item in self] ) def __getitem__( self, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self, A, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(A ): SCREAMING_SNAKE_CASE : Union[str, Any] = current.next SCREAMING_SNAKE_CASE : Any = data def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(len(self ), A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(0, A ) def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A ) if self.head is None: SCREAMING_SNAKE_CASE : Optional[int] = new_node elif index == 0: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head SCREAMING_SNAKE_CASE : Tuple = new_node else: SCREAMING_SNAKE_CASE : Optional[int] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : str = temp.next SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next SCREAMING_SNAKE_CASE : List[str] = new_node def UpperCamelCase_ ( self ): # print every node data '''simple docstring''' print(self ) def UpperCamelCase_ ( self ): '''simple docstring''' return self.delete_nth(0 ) def UpperCamelCase_ ( self ): # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def UpperCamelCase_ ( self, A = 0 ): '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node if index == 0: SCREAMING_SNAKE_CASE : List[str] = self.head.next else: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Any = temp.next SCREAMING_SNAKE_CASE : List[str] = temp.next SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next return delete_node.data def UpperCamelCase_ ( self ): '''simple docstring''' return self.head is None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : Any = self.head while current: # Store the current node's next node. SCREAMING_SNAKE_CASE : Optional[int] = current.next # Make the current node's next point backwards SCREAMING_SNAKE_CASE : int = prev # Make the previous node be the current node SCREAMING_SNAKE_CASE : int = current # Make the current node the next node (to progress iteration) SCREAMING_SNAKE_CASE : List[Any] = next_node # Return prev in order to put the head at the end SCREAMING_SNAKE_CASE : List[Any] = prev def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = LinkedList() assert linked_list.is_empty() is True assert str(__UpperCamelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__UpperCamelCase ) == i linked_list.insert_nth(__UpperCamelCase ,i + 1 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__UpperCamelCase ) == 9 assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True for i in range(0 ,9 ): SCREAMING_SNAKE_CASE : Any = -i assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True linked_list.reverse() assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [ -9, 1_00, Node(77_34_51_12 ), 'dlrow olleH', 7, 55_55, 0, -1_9_2.5_5_5_5_5, 'Hello, world!', 7_7.9, Node(10 ), None, None, 1_2.2_0, ] SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() for i in test_input: linked_list.insert_tail(__UpperCamelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head SCREAMING_SNAKE_CASE : str = linked_list.delete_head() assert result == -9 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 ) assert result is None assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__UpperCamelCase ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__UpperCamelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowercase__( ): """simple docstring""" from doctest import testmod testmod() SCREAMING_SNAKE_CASE : Dict = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(__UpperCamelCase ) print('\nReading/changing Node data using indexing:' ) print(f"Element at Position 1: {linked_list[1]}" ) SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip() print('New list:' ) print(__UpperCamelCase ) print(f"length of linked_list is : {len(__UpperCamelCase )}" ) if __name__ == "__main__": main()
28
1
'''simple docstring''' from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
28
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333} SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : Dict = batch_size SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Tuple = min_resolution SCREAMING_SNAKE_CASE : int = max_resolution SCREAMING_SNAKE_CASE : Tuple = do_resize SCREAMING_SNAKE_CASE : Tuple = size SCREAMING_SNAKE_CASE : Any = do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean SCREAMING_SNAKE_CASE : Union[str, Any] = image_std SCREAMING_SNAKE_CASE : Optional[int] = do_rescale SCREAMING_SNAKE_CASE : int = rescale_factor SCREAMING_SNAKE_CASE : List[str] = do_pad def UpperCamelCase_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if not batched: SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0] if isinstance(A, Image.Image ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w ) SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] elif w > h: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h ) else: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] else: SCREAMING_SNAKE_CASE : Union[str, Any] = [] for image in image_inputs: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0] SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : List[Any] = YolosImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A, 'image_mean' ) ) self.assertTrue(hasattr(A, 'image_std' ) ) self.assertTrue(hasattr(A, 'do_normalize' ) ) self.assertTrue(hasattr(A, 'do_resize' ) ) self.assertTrue(hasattr(A, 'size' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} ) self.assertEqual(image_processor.do_pad, A ) SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A ) self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A, Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A ) SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A ) for image in image_inputs: self.assertIsInstance(A, np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' ) SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' ) self.assertTrue( torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() ) SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target} # encode them SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' ) SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify orig_size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : int = json.loads(f.read() ) SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target} SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' ) SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify masks SCREAMING_SNAKE_CASE : Optional[int] = 822_873 self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A ) # verify orig_size SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
28
1
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) ) class _a : '''simple docstring''' def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : str = patch_size SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : int = last_hidden_size SCREAMING_SNAKE_CASE : Any = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size SCREAMING_SNAKE_CASE : Optional[Any] = output_stride SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Dict = num_labels SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = scope def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels ) SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) SCREAMING_SNAKE_CASE : int = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model(A ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.num_labels SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model(A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) SCREAMING_SNAKE_CASE : int = model(A, labels=A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) A : List[Any] = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) A : Optional[int] = False A : Dict = False A : List[Any] = False A : Optional[int] = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self ) SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A ) def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not output attentions' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A ) SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Any = ['pixel_values'] self.assertListEqual(arg_names[:1], A ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' def check_hidden_states_output(A, A, A ): SCREAMING_SNAKE_CASE : Any = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) ) SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states SCREAMING_SNAKE_CASE : List[str] = 5 self.assertEqual(len(A ), A ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. SCREAMING_SNAKE_CASE : int = 2 for i in range(len(A ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = True check_hidden_states_output(A, A, A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Optional[Any] = True check_hidden_states_output(A, A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _a ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A ) SCREAMING_SNAKE_CASE : Any = self.default_image_processor SCREAMING_SNAKE_CASE : Dict = prepare_img() SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**A ) # verify the logits SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, A ) SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A ) SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = model(**A ) SCREAMING_SNAKE_CASE : List[str] = outputs.logits # verify the logits SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [ [[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]], [[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]], [[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]], ], device=A, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : List[str] = model.to(A ) SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img() SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(**A ) SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] ) SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, A ) SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A ) SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, A )
28
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,): """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) else: return _interleave_iterable_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,): """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase ) else: return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
28
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase_ = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } UpperCamelCase_ = { "distilbert-base-uncased": 5_1_2, "distilbert-base-uncased-distilled-squad": 5_1_2, "distilbert-base-cased": 5_1_2, "distilbert-base-cased-distilled-squad": 5_1_2, "distilbert-base-german-cased": 5_1_2, "distilbert-base-multilingual-cased": 5_1_2, } UpperCamelCase_ = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION A : Optional[int] = ['''input_ids''', '''attention_mask'''] A : List[Any] = DistilBertTokenizer def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ): '''simple docstring''' super().__init__( A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase', A ) != do_lower_case or normalizer_state.get('strip_accents', A ) != strip_accents or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : List[str] = strip_accents SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A ) SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case def UpperCamelCase_ ( self, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A ) return tuple(A )
28
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) ) class _a : '''simple docstring''' def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : str = patch_size SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : int = last_hidden_size SCREAMING_SNAKE_CASE : Any = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size SCREAMING_SNAKE_CASE : Optional[Any] = output_stride SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Dict = num_labels SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = scope def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels ) SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) SCREAMING_SNAKE_CASE : int = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model(A ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.num_labels SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model(A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) SCREAMING_SNAKE_CASE : int = model(A, labels=A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) A : List[Any] = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) A : Optional[int] = False A : Dict = False A : List[Any] = False A : Optional[int] = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self ) SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A ) def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not output attentions' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A ) SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Any = ['pixel_values'] self.assertListEqual(arg_names[:1], A ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' def check_hidden_states_output(A, A, A ): SCREAMING_SNAKE_CASE : Any = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) ) SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states SCREAMING_SNAKE_CASE : List[str] = 5 self.assertEqual(len(A ), A ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. SCREAMING_SNAKE_CASE : int = 2 for i in range(len(A ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = True check_hidden_states_output(A, A, A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Optional[Any] = True check_hidden_states_output(A, A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _a ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A ) SCREAMING_SNAKE_CASE : Any = self.default_image_processor SCREAMING_SNAKE_CASE : Dict = prepare_img() SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**A ) # verify the logits SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, A ) SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A ) SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = model(**A ) SCREAMING_SNAKE_CASE : List[str] = outputs.logits # verify the logits SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [ [[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]], [[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]], [[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]], ], device=A, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : List[str] = model.to(A ) SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img() SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(**A ) SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] ) SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, A ) SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A ) SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, A )
28
1
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841 SCREAMING_SNAKE_CASE : Optional[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] ) SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
28
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase_ = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } UpperCamelCase_ = { "distilbert-base-uncased": 5_1_2, "distilbert-base-uncased-distilled-squad": 5_1_2, "distilbert-base-cased": 5_1_2, "distilbert-base-cased-distilled-squad": 5_1_2, "distilbert-base-german-cased": 5_1_2, "distilbert-base-multilingual-cased": 5_1_2, } UpperCamelCase_ = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION A : Optional[int] = ['''input_ids''', '''attention_mask'''] A : List[Any] = DistilBertTokenizer def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ): '''simple docstring''' super().__init__( A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase', A ) != do_lower_case or normalizer_state.get('strip_accents', A ) != strip_accents or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : List[str] = strip_accents SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A ) SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case def UpperCamelCase_ ( self, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A ) return tuple(A )
28
1
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def lowercase__( __UpperCamelCase: Any ): """simple docstring""" print('Loading config file...' ) def flatten_yaml_as_dict(__UpperCamelCase: List[Any] ,__UpperCamelCase: Optional[Any]="" ,__UpperCamelCase: List[str]="." ): SCREAMING_SNAKE_CASE : List[Any] = [] for k, v in d.items(): SCREAMING_SNAKE_CASE : List[Any] = parent_key + sep + k if parent_key else k if isinstance(__UpperCamelCase ,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase ,sep=__UpperCamelCase ).items() ) else: items.append((new_key, v) ) return dict(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Tuple = argparse.Namespace() with open(__UpperCamelCase ,'r' ) as yaml_file: try: SCREAMING_SNAKE_CASE : Dict = yaml.load(__UpperCamelCase ,Loader=yaml.FullLoader ) SCREAMING_SNAKE_CASE : Any = flatten_yaml_as_dict(__UpperCamelCase ) for k, v in flat_cfg.items(): setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase ,str(__UpperCamelCase ) ) ) return config def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = MobileViTVaConfig() SCREAMING_SNAKE_CASE : Any = False # dataset if task_name.startswith('imagenet1k_' ): SCREAMING_SNAKE_CASE : Dict = 10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: SCREAMING_SNAKE_CASE : Optional[Any] = 3_84 else: SCREAMING_SNAKE_CASE : List[Any] = 2_56 SCREAMING_SNAKE_CASE : Optional[Any] = 'imagenet-1k-id2label.json' elif task_name.startswith('imagenet21k_to_1k_' ): SCREAMING_SNAKE_CASE : Optional[int] = 2_10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: SCREAMING_SNAKE_CASE : List[Any] = 3_84 else: SCREAMING_SNAKE_CASE : Optional[Any] = 2_56 SCREAMING_SNAKE_CASE : Dict = 'imagenet-22k-id2label.json' elif task_name.startswith('ade20k_' ): SCREAMING_SNAKE_CASE : Optional[Any] = 1_51 SCREAMING_SNAKE_CASE : Optional[int] = 5_12 SCREAMING_SNAKE_CASE : Dict = 'ade20k-id2label.json' SCREAMING_SNAKE_CASE : Union[str, Any] = True elif task_name.startswith('voc_' ): SCREAMING_SNAKE_CASE : Tuple = 21 SCREAMING_SNAKE_CASE : List[Any] = 5_12 SCREAMING_SNAKE_CASE : Optional[Any] = 'pascal-voc-id2label.json' SCREAMING_SNAKE_CASE : Dict = True # orig_config SCREAMING_SNAKE_CASE : Optional[Any] = load_orig_config_file(__UpperCamelCase ) assert getattr(__UpperCamelCase ,'model.classification.name' ,-1 ) == "mobilevit_v2", "Invalid model" SCREAMING_SNAKE_CASE : Tuple = getattr(__UpperCamelCase ,'model.classification.mitv2.width_multiplier' ,1.0 ) assert ( getattr(__UpperCamelCase ,'model.classification.mitv2.attn_norm_layer' ,-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" SCREAMING_SNAKE_CASE : int = getattr(__UpperCamelCase ,'model.classification.activation.name' ,'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(__UpperCamelCase ,'model.segmentation.output_stride' ,16 ) if "_deeplabv3" in task_name: SCREAMING_SNAKE_CASE : str = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_rates' ,[12, 24, 36] ) SCREAMING_SNAKE_CASE : Tuple = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_out_channels' ,5_12 ) SCREAMING_SNAKE_CASE : int = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_dropout' ,0.1 ) # id2label SCREAMING_SNAKE_CASE : List[str] = 'huggingface/label-files' SCREAMING_SNAKE_CASE : Dict = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) ) SCREAMING_SNAKE_CASE : List[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in idalabel.items()} return config def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = dct.pop(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = val def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: List[str]=False ): """simple docstring""" if base_model: SCREAMING_SNAKE_CASE : Union[str, Any] = '' else: SCREAMING_SNAKE_CASE : List[str] = 'mobilevitv2.' SCREAMING_SNAKE_CASE : Optional[Any] = [] for k in state_dict.keys(): if k[:8] == "encoder.": SCREAMING_SNAKE_CASE : int = k[8:] else: SCREAMING_SNAKE_CASE : Optional[int] = k if ".block." in k: SCREAMING_SNAKE_CASE : str = k_new.replace('.block.' ,'.' ) if ".conv." in k: SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace('.conv.' ,'.convolution.' ) if ".norm." in k: SCREAMING_SNAKE_CASE : List[Any] = k_new.replace('.norm.' ,'.normalization.' ) if "conv_1." in k: SCREAMING_SNAKE_CASE : Any = k_new.replace('conv_1.' ,f"{model_prefix}conv_stem." ) for i in [1, 2]: if f"layer_{i}." in k: SCREAMING_SNAKE_CASE : List[str] = k_new.replace(f"layer_{i}." ,f"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: SCREAMING_SNAKE_CASE : List[str] = k_new.replace('.exp_1x1.' ,'.expand_1x1.' ) if ".red_1x1." in k: SCREAMING_SNAKE_CASE : Tuple = k_new.replace('.red_1x1.' ,'.reduce_1x1.' ) for i in [3, 4, 5]: if f"layer_{i}.0." in k: SCREAMING_SNAKE_CASE : List[str] = k_new.replace(f"layer_{i}.0." ,f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if f"layer_{i}.1.local_rep.0." in k: SCREAMING_SNAKE_CASE : List[str] = k_new.replace(f"layer_{i}.1.local_rep.0." ,f"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if f"layer_{i}.1.local_rep.1." in k: SCREAMING_SNAKE_CASE : Dict = k_new.replace(f"layer_{i}.1.local_rep.1." ,f"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: SCREAMING_SNAKE_CASE : int = [0, 1] elif i == 4: SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 3] elif i == 5: SCREAMING_SNAKE_CASE : List[str] = [0, 1, 2] for j in j_in: if f"layer_{i}.1.global_rep.{j}." in k: SCREAMING_SNAKE_CASE : Tuple = k_new.replace( f"layer_{i}.1.global_rep.{j}." ,f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if f"layer_{i}.1.global_rep.{j+1}." in k: SCREAMING_SNAKE_CASE : List[Any] = k_new.replace( f"layer_{i}.1.global_rep.{j+1}." ,f"{model_prefix}encoder.layer.{i-1}.layernorm." ) if f"layer_{i}.1.conv_proj." in k: SCREAMING_SNAKE_CASE : List[str] = k_new.replace(f"layer_{i}.1.conv_proj." ,f"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: SCREAMING_SNAKE_CASE : List[Any] = k_new.replace('pre_norm_attn.0.' ,'layernorm_before.' ) if "pre_norm_attn.1." in k: SCREAMING_SNAKE_CASE : List[Any] = k_new.replace('pre_norm_attn.1.' ,'attention.' ) if "pre_norm_ffn.0." in k: SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace('pre_norm_ffn.0.' ,'layernorm_after.' ) if "pre_norm_ffn.1." in k: SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace('pre_norm_ffn.1.' ,'ffn.conv1.' ) if "pre_norm_ffn.3." in k: SCREAMING_SNAKE_CASE : int = k_new.replace('pre_norm_ffn.3.' ,'ffn.conv2.' ) if "classifier.1." in k: SCREAMING_SNAKE_CASE : int = k_new.replace('classifier.1.' ,'classifier.' ) if "seg_head." in k: SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace('seg_head.' ,'segmentation_head.' ) if ".aspp_layer." in k: SCREAMING_SNAKE_CASE : Any = k_new.replace('.aspp_layer.' ,'.' ) if ".aspp_pool." in k: SCREAMING_SNAKE_CASE : List[Any] = k_new.replace('.aspp_pool.' ,'.' ) rename_keys.append((k, k_new) ) return rename_keys def lowercase__( __UpperCamelCase: Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(__UpperCamelCase ) for k in keys_to_ignore: state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Optional[Any] ,__UpperCamelCase: Optional[Any] ,__UpperCamelCase: List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = get_mobilevitva_config(__UpperCamelCase ,__UpperCamelCase ) # load original state_dict SCREAMING_SNAKE_CASE : Dict = torch.load(__UpperCamelCase ,map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval() SCREAMING_SNAKE_CASE : List[str] = False else: SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTVaForImageClassification(__UpperCamelCase ).eval() SCREAMING_SNAKE_CASE : Optional[Any] = False # remove and rename some keys of load the original model SCREAMING_SNAKE_CASE : Dict = checkpoint remove_unused_keys(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Tuple = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # load modified state_dict model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 ) SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(images=prepare_img() ,return_tensors='pt' ) SCREAMING_SNAKE_CASE : Tuple = model(**__UpperCamelCase ) # verify classification model if task_name.startswith('imagenet' ): SCREAMING_SNAKE_CASE : Any = outputs.logits SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(-1 ).item() print('Predicted class:' ,model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant SCREAMING_SNAKE_CASE : Any = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ) assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__UpperCamelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) UpperCamelCase_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
28
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 UpperCamelCase_ = get_tests_dir("fixtures") class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock() SCREAMING_SNAKE_CASE : List[Any] = 500 SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : Any = HTTPError SCREAMING_SNAKE_CASE : Any = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request', return_value=A ) as mock_head: SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' ) def UpperCamelCase_ ( self ): '''simple docstring''' with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' ) SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained( 'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' ) self.assertIsNotNone(A ) @is_staging_test class _a ( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id='test-image-processor' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' ) except HTTPError: pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, ) SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained( F"{USER}/test-dynamic-image-processor", trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
28
1
'''simple docstring''' import math class _a : '''simple docstring''' def UpperCamelCase_ ( self, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 0.0 SCREAMING_SNAKE_CASE : int = 0.0 for i in range(len(A ) ): da += math.pow((sample[i] - weights[0][i]), 2 ) da += math.pow((sample[i] - weights[1][i]), 2 ) return 0 if da > da else 1 return 0 def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' for i in range(len(A ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) SCREAMING_SNAKE_CASE : Tuple = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training SCREAMING_SNAKE_CASE : Optional[Any] = SelfOrganizingMap() SCREAMING_SNAKE_CASE : List[Any] = 3 SCREAMING_SNAKE_CASE : Union[str, Any] = 0.5 for _ in range(__UpperCamelCase ): for j in range(len(__UpperCamelCase ) ): # training sample SCREAMING_SNAKE_CASE : Dict = training_samples[j] # Compute the winning vector SCREAMING_SNAKE_CASE : Any = self_organizing_map.get_winner(__UpperCamelCase ,__UpperCamelCase ) # Update the winning vector SCREAMING_SNAKE_CASE : int = self_organizing_map.update(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # classify test sample SCREAMING_SNAKE_CASE : Optional[Any] = [0, 0, 0, 1] SCREAMING_SNAKE_CASE : Optional[int] = self_organizing_map.get_winner(__UpperCamelCase ,__UpperCamelCase ) # results print(f"Clusters that the test sample belongs to : {winner}" ) print(f"Weights that have been trained : {weights}" ) # running the main() function if __name__ == "__main__": main()
28
'''simple docstring''' class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = val SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Union[str, Any] = None def UpperCamelCase_ ( self, A ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: SCREAMING_SNAKE_CASE : Optional[int] = Node(A ) else: self.left.insert(A ) elif val > self.val: if self.right is None: SCREAMING_SNAKE_CASE : int = Node(A ) else: self.right.insert(A ) else: SCREAMING_SNAKE_CASE : int = val def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ): """simple docstring""" if root: inorder(root.left ,__UpperCamelCase ) res.append(root.val ) inorder(root.right ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[Any] ): """simple docstring""" if len(__UpperCamelCase ) == 0: return arr SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] ) for i in range(1 ,len(__UpperCamelCase ) ): root.insert(arr[i] ) # Traverse BST in order. SCREAMING_SNAKE_CASE : Dict = [] inorder(__UpperCamelCase ,__UpperCamelCase ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
28
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=3, A=18, A=30, A=400, A=True, A=None, A=True, A=None, A=True, A=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], A=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], A=True, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'height': 224, 'width': 224} SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE : Optional[int] = parent SCREAMING_SNAKE_CASE : Tuple = batch_size SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Any = image_size SCREAMING_SNAKE_CASE : List[Any] = min_resolution SCREAMING_SNAKE_CASE : List[str] = max_resolution SCREAMING_SNAKE_CASE : Tuple = do_resize SCREAMING_SNAKE_CASE : Optional[int] = size SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop SCREAMING_SNAKE_CASE : Optional[Any] = crop_size SCREAMING_SNAKE_CASE : Any = do_normalize SCREAMING_SNAKE_CASE : List[str] = image_mean SCREAMING_SNAKE_CASE : List[str] = image_std SCREAMING_SNAKE_CASE : Dict = do_convert_rgb def UpperCamelCase_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCamelCase_ ( self, A=False, A=False, A=False ): '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: SCREAMING_SNAKE_CASE : Tuple = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uinta ) ) else: SCREAMING_SNAKE_CASE : Tuple = [] for i in range(self.batch_size ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = np.random.choice(np.arange(self.min_resolution, self.max_resolution ), 2 ) image_inputs.append(np.random.randint(255, size=(self.num_channels, width, height), dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension SCREAMING_SNAKE_CASE : Optional[Any] = [Image.fromarray(np.moveaxis(A, 0, -1 ) ) for x in image_inputs] if torchify: SCREAMING_SNAKE_CASE : Union[str, Any] = [torch.from_numpy(A ) for x in image_inputs] return image_inputs @require_torch @require_vision class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPImageProcessingTester(self, do_center_crop=A ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A, 'do_resize' ) ) self.assertTrue(hasattr(A, 'size' ) ) self.assertTrue(hasattr(A, 'do_center_crop' ) ) self.assertTrue(hasattr(A, 'center_crop' ) ) self.assertTrue(hasattr(A, 'do_normalize' ) ) self.assertTrue(hasattr(A, 'image_mean' ) ) self.assertTrue(hasattr(A, 'image_std' ) ) self.assertTrue(hasattr(A, 'do_convert_rgb' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'height': 224, 'width': 224} ) self.assertEqual(image_processor.crop_size, {'height': 18, 'width': 18} ) SCREAMING_SNAKE_CASE : int = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 ) self.assertEqual(image_processor.size, {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84} ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : int = self.image_processor_tester.prepare_inputs(equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A, Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=A, numpify=A ) for image in image_inputs: self.assertIsInstance(A, np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched SCREAMING_SNAKE_CASE : str = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched SCREAMING_SNAKE_CASE : List[str] = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) @require_torch @require_vision class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : int = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ChineseCLIPImageProcessingTester(self, num_channels=4, do_center_crop=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = 3 @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A, 'do_resize' ) ) self.assertTrue(hasattr(A, 'size' ) ) self.assertTrue(hasattr(A, 'do_center_crop' ) ) self.assertTrue(hasattr(A, 'center_crop' ) ) self.assertTrue(hasattr(A, 'do_normalize' ) ) self.assertTrue(hasattr(A, 'image_mean' ) ) self.assertTrue(hasattr(A, 'image_std' ) ) self.assertTrue(hasattr(A, 'do_convert_rgb' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : str = self.image_processor_tester.prepare_inputs(equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A, Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), )
28
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ): """simple docstring""" from .. import __version__ SCREAMING_SNAKE_CASE : int = take_from SCREAMING_SNAKE_CASE : Optional[int] = () if not isinstance(args[0] ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : List[str] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ): raise ValueError( f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" f" version {__version__} is >= {version_name}" ) SCREAMING_SNAKE_CASE : Tuple = None if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(__UpperCamelCase ,__UpperCamelCase ): values += (getattr(__UpperCamelCase ,__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else '' warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0: SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1] SCREAMING_SNAKE_CASE : Any = call_frame.filename SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" ) if len(__UpperCamelCase ) == 0: return elif len(__UpperCamelCase ) == 1: return values[0] return values
28
1
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class _a : '''simple docstring''' A : int A : Node | None = None A : Node | None = None def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = Node(1 ) SCREAMING_SNAKE_CASE : Dict = Node(2 ) SCREAMING_SNAKE_CASE : int = Node(3 ) SCREAMING_SNAKE_CASE : int = Node(4 ) SCREAMING_SNAKE_CASE : Dict = Node(5 ) return tree def lowercase__( __UpperCamelCase: Node | None ): """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def lowercase__( __UpperCamelCase: Node | None ): """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def lowercase__( __UpperCamelCase: Node | None ): """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def lowercase__( __UpperCamelCase: Node | None ): """simple docstring""" return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0 def lowercase__( __UpperCamelCase: Node | None ): """simple docstring""" SCREAMING_SNAKE_CASE : list[Any] = [] if root is None: return output SCREAMING_SNAKE_CASE : Dict = deque([root] ) while process_queue: SCREAMING_SNAKE_CASE : Union[str, Any] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def lowercase__( __UpperCamelCase: Node | None ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : list[Any] = [] def populate_output(__UpperCamelCase: Node | None ,__UpperCamelCase: int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left ,level - 1 ) populate_output(root.right ,level - 1 ) populate_output(__UpperCamelCase ,__UpperCamelCase ) return output def lowercase__( __UpperCamelCase: Node | None ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : list[Any] = [] def populate_output(__UpperCamelCase: Node | None ,__UpperCamelCase: int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right ,level - 1 ) populate_output(root.left ,level - 1 ) populate_output(__UpperCamelCase ,__UpperCamelCase ) return output def lowercase__( __UpperCamelCase: Node | None ): """simple docstring""" if root is None: return [] SCREAMING_SNAKE_CASE : list[Sequence[Node | None]] = [] SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : str = height(__UpperCamelCase ) for h in range(1 ,height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(__UpperCamelCase ,__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = 1 else: output.append(get_nodes_from_right_to_left(__UpperCamelCase ,__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : List[Any] = 0 return output def lowercase__( ): # Main function for testing. """simple docstring""" SCREAMING_SNAKE_CASE : Any = make_tree() print(f"In-order Traversal: {inorder(__UpperCamelCase )}" ) print(f"Pre-order Traversal: {preorder(__UpperCamelCase )}" ) print(f"Post-order Traversal: {postorder(__UpperCamelCase )}" ,'\n' ) print(f"Height of Tree: {height(__UpperCamelCase )}" ,'\n' ) print('Complete Level Order Traversal: ' ) print(level_order(__UpperCamelCase ) ,'\n' ) print('Level-wise order Traversal: ' ) for level in range(1 ,height(__UpperCamelCase ) + 1 ): print(f"Level {level}:" ,get_nodes_from_left_to_right(__UpperCamelCase ,level=__UpperCamelCase ) ) print('\nZigZag order Traversal: ' ) print(zigzag(__UpperCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
28
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
1
'''simple docstring''' import inspect import unittest class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' try: import diffusers # noqa: F401 except ImportError: assert False def UpperCamelCase_ ( self ): '''simple docstring''' import diffusers from diffusers.dependency_versions_table import deps SCREAMING_SNAKE_CASE : Any = inspect.getmembers(A, inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": SCREAMING_SNAKE_CASE : Dict = 'k-diffusion' elif backend == "invisible_watermark": SCREAMING_SNAKE_CASE : Union[str, Any] = 'invisible-watermark' assert backend in deps, F"{backend} is not in the deps table!"
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise TypeError('Input value must be an \'int\' type' ) SCREAMING_SNAKE_CASE : int = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
28
1
'''simple docstring''' def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ): """simple docstring""" def get_matched_characters(__UpperCamelCase: str ,__UpperCamelCase: str ) -> str: SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : List[Any] = min(len(_stra ) ,len(_stra ) ) // 2 for i, l in enumerate(_stra ): SCREAMING_SNAKE_CASE : str = int(max(0 ,i - limit ) ) SCREAMING_SNAKE_CASE : Optional[Any] = int(min(i + limit + 1 ,len(_stra ) ) ) if l in _stra[left:right]: matched.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : str = f"{_stra[0:_stra.index(__UpperCamelCase )]} {_stra[_stra.index(__UpperCamelCase ) + 1:]}" return "".join(__UpperCamelCase ) # matching characters SCREAMING_SNAKE_CASE : Tuple = get_matched_characters(__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[str] = get_matched_characters(__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : str = len(__UpperCamelCase ) # transposition SCREAMING_SNAKE_CASE : Any = ( len([(ca, ca) for ca, ca in zip(__UpperCamelCase ,__UpperCamelCase ) if ca != ca] ) // 2 ) if not match_count: SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0 else: SCREAMING_SNAKE_CASE : Any = ( 1 / 3 * ( match_count / len(__UpperCamelCase ) + match_count / len(__UpperCamelCase ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters SCREAMING_SNAKE_CASE : List[Any] = 0 for ca, ca in zip(stra[:4] ,stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler("hello", "world"))
28
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ): '''simple docstring''' if tokenize_kwargs is None: SCREAMING_SNAKE_CASE : Optional[int] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) SCREAMING_SNAKE_CASE : Tuple = truncation SCREAMING_SNAKE_CASE : int = tokenize_kwargs SCREAMING_SNAKE_CASE : Optional[Any] = {} if return_tensors is not None: SCREAMING_SNAKE_CASE : Optional[int] = return_tensors return preprocess_params, {}, postprocess_params def UpperCamelCase_ ( self, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.framework SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A ) return model_inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model(**A ) return model_outputs def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self, *A, **A ): '''simple docstring''' return super().__call__(*A, **A )
28
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "OPT_PRETRAINED_MODEL_ARCHIVE_LIST", "OPTForCausalLM", "OPTModel", "OPTPreTrainedModel", "OPTForSequenceClassification", "OPTForQuestionAnswering", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "FlaxOPTForCausalLM", "FlaxOPTModel", "FlaxOPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' from __future__ import annotations import queue class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = data SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : List[str] = None def lowercase__( ): """simple docstring""" print('\n********Press N to stop entering at any point of time********\n' ) SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower() SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) ) q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: " SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = left_node q.put(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: " SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = right_node q.put(__UpperCamelCase ) raise def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return print(node.data ,end=',' ) pre_order(node.left ) pre_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return in_order(node.left ) print(node.data ,end=',' ) in_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data ,end=',' ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Optional[int] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Union[str, Any] = [] while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__UpperCamelCase ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : Optional[Any] = node while n or stack: while n: # start from root node, find its left child print(n.data ,end=',' ) stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE : List[Any] = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE : Any = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : int = node while n or stack: while n: stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = n.left SCREAMING_SNAKE_CASE : Tuple = stack.pop() print(n.data ,end=',' ) SCREAMING_SNAKE_CASE : str = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], [] SCREAMING_SNAKE_CASE : Optional[int] = node stacka.append(__UpperCamelCase ) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__UpperCamelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data ,end=',' ) def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ): """simple docstring""" if not s: return "\n" + width * char SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 ) return f"{left * char} {s} {(left + extra) * char}" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) UpperCamelCase_ = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 5_0 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
28
1
'''simple docstring''' import argparse import math import traceback import dateutil.parser as date_parser import requests def lowercase__( __UpperCamelCase: Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = {} SCREAMING_SNAKE_CASE : Optional[int] = job['started_at'] SCREAMING_SNAKE_CASE : List[str] = job['completed_at'] SCREAMING_SNAKE_CASE : List[Any] = date_parser.parse(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = date_parser.parse(__UpperCamelCase ) SCREAMING_SNAKE_CASE : str = round((end_datetime - start_datetime).total_seconds() / 6_0.0 ) SCREAMING_SNAKE_CASE : Any = start SCREAMING_SNAKE_CASE : List[Any] = end SCREAMING_SNAKE_CASE : Any = duration_in_min return job_info def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Optional[int]=None ): """simple docstring""" SCREAMING_SNAKE_CASE : int = None if token is not None: SCREAMING_SNAKE_CASE : Any = {'Accept': 'application/vnd.github+json', 'Authorization': f"Bearer {token}"} SCREAMING_SNAKE_CASE : Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" SCREAMING_SNAKE_CASE : Optional[int] = requests.get(__UpperCamelCase ,headers=__UpperCamelCase ).json() SCREAMING_SNAKE_CASE : Dict = {} try: job_time.update({job['name']: extract_time_from_single_job(__UpperCamelCase ) for job in result['jobs']} ) SCREAMING_SNAKE_CASE : Dict = math.ceil((result['total_count'] - 1_00) / 1_00 ) for i in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE : Any = requests.get(url + f"&page={i + 2}" ,headers=__UpperCamelCase ).json() job_time.update({job['name']: extract_time_from_single_job(__UpperCamelCase ) for job in result['jobs']} ) return job_time except Exception: print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = get_job_time(args.workflow_run_id) UpperCamelCase_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"""{k}: {v['duration']}""")
28
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class _a : '''simple docstring''' def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = device SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A ) SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std ) SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 ) SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.resize(A ) SCREAMING_SNAKE_CASE : Any = self.center_crop(A ) SCREAMING_SNAKE_CASE : str = self.normalize(A ) return images def __call__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A ) SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class _a ( nn.Module ): '''simple docstring''' def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device() if vqgan: SCREAMING_SNAKE_CASE : Optional[Any] = vqgan else: SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A ) self.vqgan.eval() if clip: SCREAMING_SNAKE_CASE : List[str] = clip else: SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' ) self.clip.to(self.device ) SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = iterations SCREAMING_SNAKE_CASE : Tuple = lr SCREAMING_SNAKE_CASE : Tuple = log SCREAMING_SNAKE_CASE : str = make_grid SCREAMING_SNAKE_CASE : Dict = return_val SCREAMING_SNAKE_CASE : Union[str, Any] = quantize SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [] if output_path is None: SCREAMING_SNAKE_CASE : int = './animation.gif' if input_path is None: SCREAMING_SNAKE_CASE : Optional[int] = self.save_path SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) ) if not len(A ): raise ValueError( 'No images found in save path, aborting (did you pass save_intermediate=True to the generate' ' function?)' ) if len(A ) == 1: print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' ) SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A ) SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A ) if extend_frames: SCREAMING_SNAKE_CASE : List[str] = 1.5 SCREAMING_SNAKE_CASE : int = 3 for file_name in paths: if file_name.endswith('.png' ): images.append(imageio.imread(A ) ) imageio.mimsave(A, A, duration=A ) print(F"gif saved to {output_path}" ) def UpperCamelCase_ ( self, A=None, A=None ): '''simple docstring''' if not (path or img): raise ValueError('Input either path or tensor' ) if img is not None: raise NotImplementedError SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device ) SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A ) SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A ) return z def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_() SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector if self.quantize: SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A ) else: SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent return self.vqgan.decode(A ) def UpperCamelCase_ ( self, A, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A ) SCREAMING_SNAKE_CASE : str = self.clip(**A ) SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image if weights is not None: SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) ) if neg_prompts: SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] ) else: SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device ) SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A ) return loss def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A ) SCREAMING_SNAKE_CASE : Dict = loop_post_process(A ) SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A ) print('CLIP loss', A ) if self.log: wandb.log({'CLIP Loss': clip_loss} ) clip_loss.backward(retain_graph=A ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' wandb.init(reinit=A, project='face-editor' ) wandb.config.update({'Positive Prompts': positive_prompts} ) wandb.config.update({'Negative Prompts': negative_prompts} ) wandb.config.update({'lr': self.lr, 'iterations': self.iterations} ) if image_path: SCREAMING_SNAKE_CASE : Tuple = Image.open(A ) SCREAMING_SNAKE_CASE : int = image.resize((256, 256) ) wandb.log('Original Image', wandb.Image(A ) ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if not prompts: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Dict = [] if isinstance(A, A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )] for prompt in prompts: if isinstance(A, (tuple, list) ): SCREAMING_SNAKE_CASE : List[str] = prompt[0] SCREAMING_SNAKE_CASE : Any = float(prompt[1] ) elif ":" in prompt: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' ) SCREAMING_SNAKE_CASE : Any = float(A ) else: SCREAMING_SNAKE_CASE : Dict = prompt SCREAMING_SNAKE_CASE : List[Any] = 1.0 processed_prompts.append(A ) weights.append(A ) return { "prompts": processed_prompts, "weights": torch.tensor(A, device=self.device ), } def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ): '''simple docstring''' if image_path: SCREAMING_SNAKE_CASE : int = self._get_latent(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(A, A, A ) assert pos_prompts, "You must provide at least one positive prompt." SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A ) if save_final and save_path is None: SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) ) if not os.path.exists(A ): os.makedirs(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp() os.makedirs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = save_path SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print('Original Image' ) show_pil(custom_to_pil(A ) ) SCREAMING_SNAKE_CASE : int = loop_post_process(A ) for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ): if show_intermediate: show_pil(A ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) ) if self.log: wandb.log({'Image': wandb.Image(A )} ) if show_final: show_pil(A ) if save_final: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
28
1
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ): """simple docstring""" from .. import __version__ SCREAMING_SNAKE_CASE : int = take_from SCREAMING_SNAKE_CASE : Optional[int] = () if not isinstance(args[0] ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : List[str] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ): raise ValueError( f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" f" version {__version__} is >= {version_name}" ) SCREAMING_SNAKE_CASE : Tuple = None if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(__UpperCamelCase ,__UpperCamelCase ): values += (getattr(__UpperCamelCase ,__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else '' warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0: SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1] SCREAMING_SNAKE_CASE : Any = call_frame.filename SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" ) if len(__UpperCamelCase ) == 0: return elif len(__UpperCamelCase ) == 1: return values[0] return values
28
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A ) def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet( A, A, A, A, A, A, A, A, A, A, A, ) # merge samples if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample else: SCREAMING_SNAKE_CASE : str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(A, A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Optional[int] = save_directory for controlnet in self.nets: controlnet.save_pretrained( A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, ) idx += 1 SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}" @classmethod def UpperCamelCase_ ( cls, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : List[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path while os.path.isdir(A ): SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A ) controlnets.append(A ) idx += 1 SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}" logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." ) if len(A ) == 0: raise ValueError( F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." ) return cls(A )
28
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Optional @dataclass class _a : '''simple docstring''' A : Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} ) A : Optional[str] = field( default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} ) A : Optional[str] = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} ) A : Optional[str] = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) A : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for training.'''} ) A : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} ) A : Optional[float] = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} ) A : Optional[int] = field( default=10_000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} ) A : Optional[float] = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} ) A : Optional[str] = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} ) A : Optional[int] = field( default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} ) A : Optional[int] = field( default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} ) A : Optional[bool] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} ) A : Optional[int] = field(default=50_000 , metadata={'''help''': '''Maximum number of training steps.'''} ) A : Optional[int] = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) A : Optional[int] = field(default=1_024 , metadata={'''help''': '''Sequence lengths used for training.'''} ) A : Optional[int] = field(default=1 , metadata={'''help''': '''Training seed.'''} ) A : Optional[int] = field( default=1_024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , ) A : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} ) A : Optional[bool] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If True the data is pretokenized.'''} ) @dataclass class _a : '''simple docstring''' A : Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) A : Optional[str] = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) A : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} ) A : Optional[int] = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) A : Optional[int] = field(default=1_024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} ) A : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) @dataclass class _a : '''simple docstring''' A : Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) A : Optional[int] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) A : Optional[int] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , ) A : Optional[bool] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} ) A : Optional[float] = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} ) A : Optional[int] = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} ) A : Optional[int] = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} ) A : Optional[float] = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} ) A : Optional[int] = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} ) A : Optional[int] = field( default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} ) A : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) A : Optional[str] = field( default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} ) A : Optional[str] = field( default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} ) A : Optional[int] = field( default=-1 , metadata={ '''help''': ( '''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive''' ''' number corresponds to which GPU device id to run on.''' ) } , ) @dataclass class _a : '''simple docstring''' A : Optional[int] = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.''' } , ) A : Optional[str] = field( default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} ) A : Optional[str] = field( default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} ) A : Optional[int] = field( default=100_000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} ) A : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) A : Optional[float] = field( default=1_000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} ) A : Optional[float] = field( default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} ) A : Optional[float] = field( default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} ) A : Optional[float] = field( default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} ) A : Optional[float] = field( default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} ) A : Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , ) A : Optional[bool] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} ) A : Optional[float] = field( default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} ) @dataclass class _a : '''simple docstring''' A : Optional[str] = field( default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} ) A : Optional[str] = field( default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} ) A : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) A : Optional[int] = field(default=200_000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} ) A : Optional[int] = field( default=32_768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} ) A : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} ) A : Optional[bool] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Push saved tokenizer to the hub.'''} ) @dataclass class _a : '''simple docstring''' A : Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} ) A : Optional[str] = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} ) A : Optional[str] = field( default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} ) A : Optional[int] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) @dataclass class _a : '''simple docstring''' A : Optional[str] = field( default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} ) A : Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} ) A : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} ) A : Optional[bool] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
28
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : str = ['''audio_values''', '''audio_mask'''] def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ): '''simple docstring''' super().__init__( feature_size=A, sampling_rate=A, padding_value=A, **A, ) SCREAMING_SNAKE_CASE : str = spectrogram_length SCREAMING_SNAKE_CASE : Optional[Any] = num_channels SCREAMING_SNAKE_CASE : List[str] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1] SCREAMING_SNAKE_CASE : Dict = n_fft SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate SCREAMING_SNAKE_CASE : str = sampling_rate SCREAMING_SNAKE_CASE : int = padding_value SCREAMING_SNAKE_CASE : Any = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = spectrogram( A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, ) SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1] SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0 SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled" F" with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) SCREAMING_SNAKE_CASE : int = is_batched_numpy or ( isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A, np.ndarray ): SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa ) elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis SCREAMING_SNAKE_CASE : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask SCREAMING_SNAKE_CASE : Tuple = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: SCREAMING_SNAKE_CASE : List[Any] = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa ) # convert into correct format for padding SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value for i in range(len(A ) ): SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i] SCREAMING_SNAKE_CASE : Union[str, Any] = feature # return as BatchFeature if return_attention_mask: SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features} SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A ) return encoded_inputs
28
1
'''simple docstring''' import numpy as np def lowercase__( __UpperCamelCase: np.array ): """simple docstring""" return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841 SCREAMING_SNAKE_CASE : Optional[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] ) SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
28
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : int = StableDiffusionDiffEditPipeline A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} A : str = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A : Union[str, Any] = frozenset([] ) def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, ) SCREAMING_SNAKE_CASE : int = DDIMScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, ) SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, ) SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE : int = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Dict = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Any = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' if not hasattr(self.pipeline_class, '_optional_components' ): return SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A, A, A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0] SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max() self.assertLess(A, 1E-4 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = 'cpu' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A ) SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16) ) SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 ) SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) self.assertEqual(mask[0, -3, -4], 0 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A ) SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A ) SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) @require_torch_gpu @slow class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) ) SCREAMING_SNAKE_CASE : List[str] = raw_image def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit' SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears' SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents SCREAMING_SNAKE_CASE : List[str] = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : List[Any] = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = 'a bowl of fruit' SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears' SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents SCREAMING_SNAKE_CASE : str = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : Tuple = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
28
1
'''simple docstring''' from collections import deque def lowercase__( __UpperCamelCase: List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = len(__UpperCamelCase ) SCREAMING_SNAKE_CASE : str = deque() SCREAMING_SNAKE_CASE : int = [False for _ in range(__UpperCamelCase )] SCREAMING_SNAKE_CASE : List[str] = [-1 for _ in range(__UpperCamelCase )] SCREAMING_SNAKE_CASE : str = index_of[:] def strong_connect(__UpperCamelCase: List[Any] ,__UpperCamelCase: str ,__UpperCamelCase: Union[str, Any] ): SCREAMING_SNAKE_CASE : int = index # the number when this node is seen SCREAMING_SNAKE_CASE : str = index # lowest rank node reachable from here index += 1 stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : str = True for w in g[v]: if index_of[w] == -1: SCREAMING_SNAKE_CASE : Tuple = strong_connect(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: SCREAMING_SNAKE_CASE : str = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: SCREAMING_SNAKE_CASE : Dict = [] SCREAMING_SNAKE_CASE : Dict = stack.pop() SCREAMING_SNAKE_CASE : Optional[int] = False component.append(__UpperCamelCase ) while w != v: SCREAMING_SNAKE_CASE : List[Any] = stack.pop() SCREAMING_SNAKE_CASE : int = False component.append(__UpperCamelCase ) components.append(__UpperCamelCase ) return index SCREAMING_SNAKE_CASE : int = [] for v in range(__UpperCamelCase ): if index_of[v] == -1: strong_connect(__UpperCamelCase ,0 ,__UpperCamelCase ) return components def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [[] for _ in range(__UpperCamelCase )] for u, v in edges: g[u].append(__UpperCamelCase ) return g if __name__ == "__main__": # Test UpperCamelCase_ = 7 UpperCamelCase_ = [0, 0, 1, 2, 3, 3, 4, 4, 6] UpperCamelCase_ = [1, 3, 2, 0, 1, 4, 5, 6, 5] UpperCamelCase_ = [(u, v) for u, v in zip(source, target)] UpperCamelCase_ = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int = 1_00_00_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )] for i in range(2 ,limit + 1 ): if phi[i] == i - 1: for j in range(2 * i ,limit + 1 ,__UpperCamelCase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
28
1
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { "configuration_trajectory_transformer": [ "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrajectoryTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TrajectoryTransformerModel", "TrajectoryTransformerPreTrainedModel", "load_tf_weights_in_trajectory_transformer", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : str = LongformerTokenizer A : List[str] = True A : Optional[int] = LongformerTokenizerFast A : Tuple = True def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE : Any = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) ) SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(A ) + '\n' ) with open(self.merges_file, 'w', encoding='utf-8' ) as fp: fp.write('\n'.join(A ) ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'lower newer' SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer' return input_text, output_text def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer' SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A ) SCREAMING_SNAKE_CASE : int = tokenizer.encode( 'sequence builders', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode( 'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.' SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A, A ) SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A, A ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A, A ) # Testing spaces after special tokens SCREAMING_SNAKE_CASE : Optional[int] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A ) SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence' SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence' SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Tuple = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A, A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A ) SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.' SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), ) SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def UpperCamelCase_ ( self ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ): SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['add_prefix_space'], A ) self.assertEqual(post_processor_state['trim_offsets'], A ) def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}" SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Any = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A, use_fast=A, add_prefix_space=A, trim_offsets=A ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
28
1
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A ) def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet( A, A, A, A, A, A, A, A, A, A, A, ) # merge samples if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample else: SCREAMING_SNAKE_CASE : str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(A, A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Optional[int] = save_directory for controlnet in self.nets: controlnet.save_pretrained( A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, ) idx += 1 SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}" @classmethod def UpperCamelCase_ ( cls, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : List[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path while os.path.isdir(A ): SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A ) controlnets.append(A ) idx += 1 SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}" logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." ) if len(A ) == 0: raise ValueError( F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." ) return cls(A )
28
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Union[str, Any] = StableDiffusionXLImgaImgPipeline A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} A : str = PipelineTesterMixin.required_optional_params - {'''latents'''} A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, ) SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler( beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, ) SCREAMING_SNAKE_CASE : int = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A ) SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A ) SCREAMING_SNAKE_CASE : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : str = image / 2 + 0.5 if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : List[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : str = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A ) SCREAMING_SNAKE_CASE : str = sd_pipe.to(A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) # forward without prompt embeds SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']] SCREAMING_SNAKE_CASE : int = sd_pipe(**A ) SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )] ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A ) SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe( **A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, ) SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) ) SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A ) SCREAMING_SNAKE_CASE : str = pipe(**A ).images SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
28
1
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ,__UpperCamelCase: int ,__UpperCamelCase: Optional[Any]=True ,__UpperCamelCase: Dict="pt" ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = {'add_prefix_space': True} if isinstance(__UpperCamelCase ,__UpperCamelCase ) and not line.startswith(' ' ) else {} SCREAMING_SNAKE_CASE : Any = padding_side return tokenizer( [line] ,max_length=__UpperCamelCase ,padding='max_length' if pad_to_max_length else None ,truncation=__UpperCamelCase ,return_tensors=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,**__UpperCamelCase ,) def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: str ,__UpperCamelCase: List[str]=None ,): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.ne(__UpperCamelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A, A, A, A, A="train", A=None, A=None, A=None, A="", ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[Any] = Path(A ).joinpath(type_path + '.source' ) SCREAMING_SNAKE_CASE : int = Path(A ).joinpath(type_path + '.target' ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_char_lens(self.src_file ) SCREAMING_SNAKE_CASE : Any = max_source_length SCREAMING_SNAKE_CASE : str = max_target_length assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}" SCREAMING_SNAKE_CASE : Any = tokenizer SCREAMING_SNAKE_CASE : Optional[Any] = prefix if n_obs is not None: SCREAMING_SNAKE_CASE : Any = self.src_lens[:n_obs] SCREAMING_SNAKE_CASE : Optional[int] = src_lang SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_lang def __len__( self ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = index + 1 # linecache starts at 1 SCREAMING_SNAKE_CASE : List[str] = self.prefix + linecache.getline(str(self.src_file ), A ).rstrip('\n' ) SCREAMING_SNAKE_CASE : Tuple = linecache.getline(str(self.tgt_file ), A ).rstrip('\n' ) assert source_line, F"empty source line for index {index}" assert tgt_line, F"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer, A ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right SCREAMING_SNAKE_CASE : Optional[Any] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer, A ) else self.tokenizer ) SCREAMING_SNAKE_CASE : str = self.tokenizer.generator if isinstance(self.tokenizer, A ) else self.tokenizer SCREAMING_SNAKE_CASE : int = encode_line(A, A, self.max_source_length, 'right' ) SCREAMING_SNAKE_CASE : List[str] = encode_line(A, A, self.max_target_length, 'right' ) SCREAMING_SNAKE_CASE : Tuple = source_inputs['input_ids'].squeeze() SCREAMING_SNAKE_CASE : Dict = target_inputs['input_ids'].squeeze() SCREAMING_SNAKE_CASE : Optional[Any] = source_inputs['attention_mask'].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def UpperCamelCase_ ( A ): '''simple docstring''' return [len(A ) for x in Path(A ).open().readlines()] def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = torch.stack([x['input_ids'] for x in batch] ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([x['attention_mask'] for x in batch] ) SCREAMING_SNAKE_CASE : Dict = torch.stack([x['decoder_input_ids'] for x in batch] ) SCREAMING_SNAKE_CASE : int = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer, A ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Tuple = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer, A ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Dict = trim_batch(A, A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = trim_batch(A, A, attention_mask=A ) SCREAMING_SNAKE_CASE : List[str] = { 'input_ids': source_ids, 'attention_mask': source_mask, 'decoder_input_ids': y, } return batch UpperCamelCase_ = getLogger(__name__) def lowercase__( __UpperCamelCase: List[List] ): """simple docstring""" return list(itertools.chain.from_iterable(__UpperCamelCase ) ) def lowercase__( __UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = get_git_info() save_json(__UpperCamelCase ,os.path.join(__UpperCamelCase ,'git_log.json' ) ) def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: Dict=4 ,**__UpperCamelCase: str ): """simple docstring""" with open(__UpperCamelCase ,'w' ) as f: json.dump(__UpperCamelCase ,__UpperCamelCase ,indent=__UpperCamelCase ,**__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[str] ): """simple docstring""" with open(__UpperCamelCase ) as f: return json.load(__UpperCamelCase ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = git.Repo(search_parent_directories=__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = { 'repo_id': str(__UpperCamelCase ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), 'hostname': str(socket.gethostname() ), } return repo_infos def lowercase__( __UpperCamelCase: Callable ,__UpperCamelCase: Iterable ): """simple docstring""" return list(map(__UpperCamelCase ,__UpperCamelCase ) ) def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Dict ): """simple docstring""" with open(__UpperCamelCase ,'wb' ) as f: return pickle.dump(__UpperCamelCase ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: Union[str, Any] ): """simple docstring""" def remove_articles(__UpperCamelCase: str ): return re.sub(r'\b(a|an|the)\b' ,' ' ,__UpperCamelCase ) def white_space_fix(__UpperCamelCase: Tuple ): return " ".join(text.split() ) def remove_punc(__UpperCamelCase: int ): SCREAMING_SNAKE_CASE : str = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__UpperCamelCase: int ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) ) def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = normalize_answer(__UpperCamelCase ).split() SCREAMING_SNAKE_CASE : Any = normalize_answer(__UpperCamelCase ).split() SCREAMING_SNAKE_CASE : str = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = sum(common.values() ) if num_same == 0: return 0 SCREAMING_SNAKE_CASE : List[str] = 1.0 * num_same / len(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = 1.0 * num_same / len(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = (2 * precision * recall) / (precision + recall) return fa def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Any ): """simple docstring""" return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: List[str] ): """simple docstring""" assert len(__UpperCamelCase ) == len(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for hypo, pred in zip(__UpperCamelCase ,__UpperCamelCase ): em += exact_match_score(__UpperCamelCase ,__UpperCamelCase ) if len(__UpperCamelCase ) > 0: em /= len(__UpperCamelCase ) return {"em": em} def lowercase__( __UpperCamelCase: Union[str, Any] ): """simple docstring""" return model_prefix.startswith('rag' ) def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Dict ,__UpperCamelCase: Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : int = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead SCREAMING_SNAKE_CASE : List[str] = 'dropout_rate' for p in extra_params: if getattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): if not hasattr(__UpperCamelCase ,__UpperCamelCase ) and not hasattr(__UpperCamelCase ,equivalent_param[p] ): logger.info('config doesn\'t have a `{}` attribute'.format(__UpperCamelCase ) ) delattr(__UpperCamelCase ,__UpperCamelCase ) continue SCREAMING_SNAKE_CASE : Optional[Any] = p if hasattr(__UpperCamelCase ,__UpperCamelCase ) else equivalent_param[p] setattr(__UpperCamelCase ,__UpperCamelCase ,getattr(__UpperCamelCase ,__UpperCamelCase ) ) delattr(__UpperCamelCase ,__UpperCamelCase ) return hparams, config
28
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Dict = '''char''' A : Any = '''bpe''' A : Dict = '''wp''' UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = ['''image_processor''', '''char_tokenizer'''] A : int = '''ViTImageProcessor''' A : List[str] = '''MgpstrTokenizer''' def __init__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', A, ) SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(A, A ) def __call__( self, A=None, A=None, A=None, **A ): '''simple docstring''' if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A ) if text is not None: SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE : Any = encodings['input_ids'] return inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : Tuple = [] for i in range(A ): SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]] SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]] SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) SCREAMING_SNAKE_CASE : List[Any] = {} SCREAMING_SNAKE_CASE : int = final_strs SCREAMING_SNAKE_CASE : Any = final_scores SCREAMING_SNAKE_CASE : Dict = char_strs SCREAMING_SNAKE_CASE : Any = bpe_strs SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs return out def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if format == DecodeType.CHARACTER: SCREAMING_SNAKE_CASE : List[Any] = self.char_decode SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : str = '[s]' elif format == DecodeType.BPE: SCREAMING_SNAKE_CASE : str = self.bpe_decode SCREAMING_SNAKE_CASE : str = 2 SCREAMING_SNAKE_CASE : List[str] = '#' elif format == DecodeType.WORDPIECE: SCREAMING_SNAKE_CASE : Any = self.wp_decode SCREAMING_SNAKE_CASE : Tuple = 102 SCREAMING_SNAKE_CASE : List[Any] = '[SEP]' else: raise ValueError(F"Format {format} is not supported." ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], [] SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 ) SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A ) SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:] SCREAMING_SNAKE_CASE : List[Any] = decoder(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 ) SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:] for index in range(A ): SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A ) SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos] SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist() SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1 SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1] SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A ) conf_scores.append(A ) return dec_strs, conf_scores def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )] return decode_strs def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )] return decode_strs
28
1
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = [10, 20, 30, 40, 50, 60] SCREAMING_SNAKE_CASE : Union[str, Any] = [2, 4, 6, 8, 10, 12] SCREAMING_SNAKE_CASE : Any = 100 self.assertEqual(kp.calc_profit(A, A, A ), 210 ) def UpperCamelCase_ ( self ): '''simple docstring''' self.assertRaisesRegex(A, 'max_weight must greater than zero.' ) def UpperCamelCase_ ( self ): '''simple docstring''' self.assertRaisesRegex(A, 'Weight can not be negative.' ) def UpperCamelCase_ ( self ): '''simple docstring''' self.assertRaisesRegex(A, 'Profit can not be negative.' ) def UpperCamelCase_ ( self ): '''simple docstring''' self.assertRaisesRegex(A, 'max_weight must greater than zero.' ) def UpperCamelCase_ ( self ): '''simple docstring''' self.assertRaisesRegex( A, 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
28
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger("transformers.models.speecht5") def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ): """simple docstring""" hf_model.apply_weight_norm() SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v'] SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"] SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"] SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"] SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g'] SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v'] SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,): """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig() SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase ) load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 ) SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float() SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float() model.save_pretrained(__UpperCamelCase ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) UpperCamelCase_ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
28
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=3, A=10, A=18, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {'shortest_edge': 18} SCREAMING_SNAKE_CASE : Dict = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE : Optional[Any] = parent SCREAMING_SNAKE_CASE : Dict = batch_size SCREAMING_SNAKE_CASE : Dict = num_channels SCREAMING_SNAKE_CASE : int = num_frames SCREAMING_SNAKE_CASE : Dict = image_size SCREAMING_SNAKE_CASE : Dict = min_resolution SCREAMING_SNAKE_CASE : Optional[Any] = max_resolution SCREAMING_SNAKE_CASE : int = do_resize SCREAMING_SNAKE_CASE : str = size SCREAMING_SNAKE_CASE : str = do_normalize SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean SCREAMING_SNAKE_CASE : Any = image_std SCREAMING_SNAKE_CASE : Tuple = crop_size def UpperCamelCase_ ( self ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Union[str, Any] = VivitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = VivitImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A, 'image_mean' ) ) self.assertTrue(hasattr(A, 'image_std' ) ) self.assertTrue(hasattr(A, 'do_normalize' ) ) self.assertTrue(hasattr(A, 'do_resize' ) ) self.assertTrue(hasattr(A, 'do_center_crop' ) ) self.assertTrue(hasattr(A, 'size' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 18} ) self.assertEqual(image_processor.crop_size, {'height': 18, 'width': 18} ) SCREAMING_SNAKE_CASE : Any = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 ) self.assertEqual(image_processor.size, {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84} ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos SCREAMING_SNAKE_CASE : Tuple = prepare_video_inputs(self.image_processor_tester, equal_resolution=A ) for video in video_inputs: self.assertIsInstance(A, A ) self.assertIsInstance(video[0], Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : int = image_processing(video_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Any = prepare_video_inputs(self.image_processor_tester, equal_resolution=A, numpify=A ) for video in video_inputs: self.assertIsInstance(A, A ) self.assertIsInstance(video[0], np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Any = image_processing(video_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched SCREAMING_SNAKE_CASE : str = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Tuple = prepare_video_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for video in video_inputs: self.assertIsInstance(A, A ) self.assertIsInstance(video[0], torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : List[Any] = image_processing(video_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched SCREAMING_SNAKE_CASE : Dict = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), )
28
'''simple docstring''' from typing import Any class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = data SCREAMING_SNAKE_CASE : Any = None def __repr__( self ): '''simple docstring''' return F"Node({self.data})" class _a : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = None def __iter__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.head while node: yield node.data SCREAMING_SNAKE_CASE : List[str] = node.next def __len__( self ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ): '''simple docstring''' return "->".join([str(A ) for item in self] ) def __getitem__( self, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self, A, A ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(A ): SCREAMING_SNAKE_CASE : Union[str, Any] = current.next SCREAMING_SNAKE_CASE : Any = data def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(len(self ), A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' self.insert_nth(0, A ) def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A ) if self.head is None: SCREAMING_SNAKE_CASE : Optional[int] = new_node elif index == 0: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head SCREAMING_SNAKE_CASE : Tuple = new_node else: SCREAMING_SNAKE_CASE : Optional[int] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : str = temp.next SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next SCREAMING_SNAKE_CASE : List[str] = new_node def UpperCamelCase_ ( self ): # print every node data '''simple docstring''' print(self ) def UpperCamelCase_ ( self ): '''simple docstring''' return self.delete_nth(0 ) def UpperCamelCase_ ( self ): # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def UpperCamelCase_ ( self, A = 0 ): '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node if index == 0: SCREAMING_SNAKE_CASE : List[str] = self.head.next else: SCREAMING_SNAKE_CASE : Union[str, Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Any = temp.next SCREAMING_SNAKE_CASE : List[str] = temp.next SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next return delete_node.data def UpperCamelCase_ ( self ): '''simple docstring''' return self.head is None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : Any = self.head while current: # Store the current node's next node. SCREAMING_SNAKE_CASE : Optional[int] = current.next # Make the current node's next point backwards SCREAMING_SNAKE_CASE : int = prev # Make the previous node be the current node SCREAMING_SNAKE_CASE : int = current # Make the current node the next node (to progress iteration) SCREAMING_SNAKE_CASE : List[Any] = next_node # Return prev in order to put the head at the end SCREAMING_SNAKE_CASE : List[Any] = prev def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = LinkedList() assert linked_list.is_empty() is True assert str(__UpperCamelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__UpperCamelCase ) == i linked_list.insert_nth(__UpperCamelCase ,i + 1 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__UpperCamelCase ) == 9 assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True for i in range(0 ,9 ): SCREAMING_SNAKE_CASE : Any = -i assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True linked_list.reverse() assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [ -9, 1_00, Node(77_34_51_12 ), 'dlrow olleH', 7, 55_55, 0, -1_9_2.5_5_5_5_5, 'Hello, world!', 7_7.9, Node(10 ), None, None, 1_2.2_0, ] SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() for i in test_input: linked_list.insert_tail(__UpperCamelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head SCREAMING_SNAKE_CASE : str = linked_list.delete_head() assert result == -9 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 ) assert result is None assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__UpperCamelCase ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__UpperCamelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowercase__( ): """simple docstring""" from doctest import testmod testmod() SCREAMING_SNAKE_CASE : Dict = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(__UpperCamelCase ) print('\nReading/changing Node data using indexing:' ) print(f"Element at Position 1: {linked_list[1]}" ) SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip() print('New list:' ) print(__UpperCamelCase ) print(f"length of linked_list is : {len(__UpperCamelCase )}" ) if __name__ == "__main__": main()
28
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase_ = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333} SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : Dict = batch_size SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Tuple = min_resolution SCREAMING_SNAKE_CASE : int = max_resolution SCREAMING_SNAKE_CASE : Tuple = do_resize SCREAMING_SNAKE_CASE : Tuple = size SCREAMING_SNAKE_CASE : Any = do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean SCREAMING_SNAKE_CASE : Union[str, Any] = image_std SCREAMING_SNAKE_CASE : Optional[int] = do_rescale SCREAMING_SNAKE_CASE : int = rescale_factor SCREAMING_SNAKE_CASE : List[str] = do_pad def UpperCamelCase_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if not batched: SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0] if isinstance(A, Image.Image ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w ) SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] elif w > h: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h ) else: SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge'] SCREAMING_SNAKE_CASE : int = self.size['shortest_edge'] else: SCREAMING_SNAKE_CASE : Union[str, Any] = [] for image in image_inputs: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0] SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : List[Any] = YolosImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A, 'image_mean' ) ) self.assertTrue(hasattr(A, 'image_std' ) ) self.assertTrue(hasattr(A, 'do_normalize' ) ) self.assertTrue(hasattr(A, 'do_resize' ) ) self.assertTrue(hasattr(A, 'size' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} ) self.assertEqual(image_processor.do_pad, A ) SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A ) self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad, A ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A, Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A ) SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A ) for image in image_inputs: self.assertIsInstance(A, np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for image in image_inputs: self.assertIsInstance(A, torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' ) SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' ) self.assertTrue( torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() ) SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target} # encode them SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' ) SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify orig_size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : int = json.loads(f.read() ) SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target} SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' ) SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) ) # verify boxes SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) ) # verify class_labels SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) ) # verify masks SCREAMING_SNAKE_CASE : Optional[int] = 822_873 self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A ) # verify orig_size SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) ) # verify size SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
28
1
'''simple docstring''' import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput UpperCamelCase_ = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, *A, A=None, A=None, A=None, **A ): '''simple docstring''' super().__init__(*A, **A ) SCREAMING_SNAKE_CASE : Optional[Any] = eval_examples SCREAMING_SNAKE_CASE : List[Any] = post_process_function SCREAMING_SNAKE_CASE : List[Any] = quant_trainer_args SCREAMING_SNAKE_CASE : Tuple = 128 # default number of calibration samples def UpperCamelCase_ ( self, A=None ): '''simple docstring''' if calib_dataset is None and self.calib_dataset is None: raise ValueError('Trainer: calibration requires an calib_dataset.' ) SCREAMING_SNAKE_CASE : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset SCREAMING_SNAKE_CASE : Tuple = self._remove_unused_columns(A, description='Calibration' ) return DataLoader( A, batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, pin_memory=self.args.dataloader_pin_memory, shuffle=A, ) def UpperCamelCase_ ( self, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset SCREAMING_SNAKE_CASE : Any = self.get_calib_dataloader(A ) SCREAMING_SNAKE_CASE : List[Any] = self.model quant_trainer.configure_model(A, self.quant_trainer_args, calib=A ) model.eval() quant_trainer.enable_calibration(A ) logger.info('***** Running calibration *****' ) logger.info(F" Num examples = {self.calib_num}" ) logger.info(F" Batch size = {calib_dataloader.batch_size}" ) for step, inputs in enumerate(A ): # Prediction step SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.prediction_step(A, A, prediction_loss_only=A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(A, self.quant_trainer_args ) SCREAMING_SNAKE_CASE : List[Any] = model def UpperCamelCase_ ( self, A=None, A=None, A=None, A = "eval" ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_eval_dataloader(A ) SCREAMING_SNAKE_CASE : Dict = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. SCREAMING_SNAKE_CASE : int = self.compute_metrics SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: SCREAMING_SNAKE_CASE : List[str] = eval_loop( A, description='Evaluation', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=A, ) finally: SCREAMING_SNAKE_CASE : List[str] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: SCREAMING_SNAKE_CASE : Tuple = self.post_process_function(A, A, output.predictions ) SCREAMING_SNAKE_CASE : Optional[int] = self.compute_metrics(A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): SCREAMING_SNAKE_CASE : List[Any] = metrics.pop(A ) self.log(A ) else: SCREAMING_SNAKE_CASE : Optional[int] = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) SCREAMING_SNAKE_CASE : Dict = self.callback_handler.on_evaluate(self.args, self.state, self.control, A ) return metrics def UpperCamelCase_ ( self, A, A, A=None, A = "test" ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.get_test_dataloader(A ) # Temporarily disable metric computation, we will do it in the loop here. SCREAMING_SNAKE_CASE : List[str] = self.compute_metrics SCREAMING_SNAKE_CASE : str = None SCREAMING_SNAKE_CASE : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: SCREAMING_SNAKE_CASE : int = eval_loop( A, description='Prediction', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=A, ) finally: SCREAMING_SNAKE_CASE : Tuple = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output SCREAMING_SNAKE_CASE : int = self.post_process_function(A, A, output.predictions, 'predict' ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.compute_metrics(A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): SCREAMING_SNAKE_CASE : Optional[int] = metrics.pop(A ) return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=A ) def UpperCamelCase_ ( self, A="./" ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.eval_dataset SCREAMING_SNAKE_CASE : int = self.get_eval_dataloader(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(A ) ) # saving device - to make it consistent SCREAMING_SNAKE_CASE : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) # convert to tuple SCREAMING_SNAKE_CASE : Optional[int] = tuple(v.to(A ) for k, v in batch.items() ) logger.info('Converting model to be onnx compatible' ) from pytorch_quantization.nn import TensorQuantizer SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Dict = self.model.to(A ) model.eval() model.float() SCREAMING_SNAKE_CASE : int = model.module if hasattr(A, 'module' ) else model quant_trainer.configure_model(A, self.quant_trainer_args ) SCREAMING_SNAKE_CASE : Any = os.path.join(A, 'model.onnx' ) logger.info(F"exporting model to {output_model_file}" ) SCREAMING_SNAKE_CASE : List[str] = {0: 'batch_size', 1: 'seq_len'} torch.onnx.export( A, A, A, export_params=A, opset_version=13, do_constant_folding=A, input_names=['input_ids', 'attention_mask', 'token_type_ids'], output_names=['output_start_logits', 'output_end_logits'], dynamic_axes={ 'input_ids': axes, 'attention_mask': axes, 'token_type_ids': axes, 'output_start_logits': axes, 'output_end_logits': axes, }, verbose=A, ) logger.info('onnx export finished' )
28
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,): """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) else: return _interleave_iterable_datasets( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,): """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ): if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase ) else: return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
28
1
'''simple docstring''' from collections.abc import Callable def lowercase__( __UpperCamelCase: Callable[[float], float] ,__UpperCamelCase: float ,__UpperCamelCase: float ): """simple docstring""" SCREAMING_SNAKE_CASE : float = a SCREAMING_SNAKE_CASE : float = b if function(__UpperCamelCase ) == 0: # one of the a or b is a root for the function return a elif function(__UpperCamelCase ) == 0: return b elif ( function(__UpperCamelCase ) * function(__UpperCamelCase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: SCREAMING_SNAKE_CASE : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(__UpperCamelCase ) == 0: return mid elif function(__UpperCamelCase ) * function(__UpperCamelCase ) < 0: SCREAMING_SNAKE_CASE : Dict = mid else: SCREAMING_SNAKE_CASE : List[Any] = mid SCREAMING_SNAKE_CASE : Dict = start + (end - start) / 2.0 return mid def lowercase__( __UpperCamelCase: float ): """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_0_0_0)) import doctest doctest.testmod()
28
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) ) class _a : '''simple docstring''' def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : str = patch_size SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : int = last_hidden_size SCREAMING_SNAKE_CASE : Any = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size SCREAMING_SNAKE_CASE : Optional[Any] = output_stride SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Dict = num_labels SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = scope def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels ) SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) SCREAMING_SNAKE_CASE : int = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model(A ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.num_labels SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model(A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) SCREAMING_SNAKE_CASE : int = model(A, labels=A ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) A : List[Any] = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) A : Optional[int] = False A : Dict = False A : List[Any] = False A : Optional[int] = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self ) SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A ) def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not output attentions' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A ) SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Any = ['pixel_values'] self.assertListEqual(arg_names[:1], A ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' def check_hidden_states_output(A, A, A ): SCREAMING_SNAKE_CASE : Any = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) ) SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states SCREAMING_SNAKE_CASE : List[str] = 5 self.assertEqual(len(A ), A ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. SCREAMING_SNAKE_CASE : int = 2 for i in range(len(A ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = True check_hidden_states_output(A, A, A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Optional[Any] = True check_hidden_states_output(A, A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _a ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A ) SCREAMING_SNAKE_CASE : Any = self.default_image_processor SCREAMING_SNAKE_CASE : Dict = prepare_img() SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Tuple = model(**A ) # verify the logits SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, A ) SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A ) SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = model(**A ) SCREAMING_SNAKE_CASE : List[str] = outputs.logits # verify the logits SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [ [[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]], [[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]], [[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]], ], device=A, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : List[str] = model.to(A ) SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img() SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(**A ) SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] ) SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, A ) SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A ) SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, A )
28
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCamelCase_ = { "Acehnese Arabic": "ace_Arab", "Acehnese Latin": "ace_Latn", "Mesopotamian Arabic": "acm_Arab", "Ta'izzi-Adeni Arabic": "acq_Arab", "Tunisian Arabic": "aeb_Arab", "Afrikaans": "afr_Latn", "South Levantine Arabic": "ajp_Arab", "Akan": "aka_Latn", "Amharic": "amh_Ethi", "North Levantine Arabic": "apc_Arab", "Modern Standard Arabic": "arb_Arab", "Modern Standard Arabic Romanized": "arb_Latn", "Najdi Arabic": "ars_Arab", "Moroccan Arabic": "ary_Arab", "Egyptian Arabic": "arz_Arab", "Assamese": "asm_Beng", "Asturian": "ast_Latn", "Awadhi": "awa_Deva", "Central Aymara": "ayr_Latn", "South Azerbaijani": "azb_Arab", "North Azerbaijani": "azj_Latn", "Bashkir": "bak_Cyrl", "Bambara": "bam_Latn", "Balinese": "ban_Latn", "Belarusian": "bel_Cyrl", "Bemba": "bem_Latn", "Bengali": "ben_Beng", "Bhojpuri": "bho_Deva", "Banjar Arabic": "bjn_Arab", "Banjar Latin": "bjn_Latn", "Standard Tibetan": "bod_Tibt", "Bosnian": "bos_Latn", "Buginese": "bug_Latn", "Bulgarian": "bul_Cyrl", "Catalan": "cat_Latn", "Cebuano": "ceb_Latn", "Czech": "ces_Latn", "Chokwe": "cjk_Latn", "Central Kurdish": "ckb_Arab", "Crimean Tatar": "crh_Latn", "Welsh": "cym_Latn", "Danish": "dan_Latn", "German": "deu_Latn", "Southwestern Dinka": "dik_Latn", "Dyula": "dyu_Latn", "Dzongkha": "dzo_Tibt", "Greek": "ell_Grek", "English": "eng_Latn", "Esperanto": "epo_Latn", "Estonian": "est_Latn", "Basque": "eus_Latn", "Ewe": "ewe_Latn", "Faroese": "fao_Latn", "Fijian": "fij_Latn", "Finnish": "fin_Latn", "Fon": "fon_Latn", "French": "fra_Latn", "Friulian": "fur_Latn", "Nigerian Fulfulde": "fuv_Latn", "Scottish Gaelic": "gla_Latn", "Irish": "gle_Latn", "Galician": "glg_Latn", "Guarani": "grn_Latn", "Gujarati": "guj_Gujr", "Haitian Creole": "hat_Latn", "Hausa": "hau_Latn", "Hebrew": "heb_Hebr", "Hindi": "hin_Deva", "Chhattisgarhi": "hne_Deva", "Croatian": "hrv_Latn", "Hungarian": "hun_Latn", "Armenian": "hye_Armn", "Igbo": "ibo_Latn", "Ilocano": "ilo_Latn", "Indonesian": "ind_Latn", "Icelandic": "isl_Latn", "Italian": "ita_Latn", "Javanese": "jav_Latn", "Japanese": "jpn_Jpan", "Kabyle": "kab_Latn", "Jingpho": "kac_Latn", "Kamba": "kam_Latn", "Kannada": "kan_Knda", "Kashmiri Arabic": "kas_Arab", "Kashmiri Devanagari": "kas_Deva", "Georgian": "kat_Geor", "Central Kanuri Arabic": "knc_Arab", "Central Kanuri Latin": "knc_Latn", "Kazakh": "kaz_Cyrl", "Kabiyè": "kbp_Latn", "Kabuverdianu": "kea_Latn", "Khmer": "khm_Khmr", "Kikuyu": "kik_Latn", "Kinyarwanda": "kin_Latn", "Kyrgyz": "kir_Cyrl", "Kimbundu": "kmb_Latn", "Northern Kurdish": "kmr_Latn", "Kikongo": "kon_Latn", "Korean": "kor_Hang", "Lao": "lao_Laoo", "Ligurian": "lij_Latn", "Limburgish": "lim_Latn", "Lingala": "lin_Latn", "Lithuanian": "lit_Latn", "Lombard": "lmo_Latn", "Latgalian": "ltg_Latn", "Luxembourgish": "ltz_Latn", "Luba-Kasai": "lua_Latn", "Ganda": "lug_Latn", "Luo": "luo_Latn", "Mizo": "lus_Latn", "Standard Latvian": "lvs_Latn", "Magahi": "mag_Deva", "Maithili": "mai_Deva", "Malayalam": "mal_Mlym", "Marathi": "mar_Deva", "Minangkabau Arabic ": "min_Arab", "Minangkabau Latin": "min_Latn", "Macedonian": "mkd_Cyrl", "Plateau Malagasy": "plt_Latn", "Maltese": "mlt_Latn", "Meitei Bengali": "mni_Beng", "Halh Mongolian": "khk_Cyrl", "Mossi": "mos_Latn", "Maori": "mri_Latn", "Burmese": "mya_Mymr", "Dutch": "nld_Latn", "Norwegian Nynorsk": "nno_Latn", "Norwegian Bokmål": "nob_Latn", "Nepali": "npi_Deva", "Northern Sotho": "nso_Latn", "Nuer": "nus_Latn", "Nyanja": "nya_Latn", "Occitan": "oci_Latn", "West Central Oromo": "gaz_Latn", "Odia": "ory_Orya", "Pangasinan": "pag_Latn", "Eastern Panjabi": "pan_Guru", "Papiamento": "pap_Latn", "Western Persian": "pes_Arab", "Polish": "pol_Latn", "Portuguese": "por_Latn", "Dari": "prs_Arab", "Southern Pashto": "pbt_Arab", "Ayacucho Quechua": "quy_Latn", "Romanian": "ron_Latn", "Rundi": "run_Latn", "Russian": "rus_Cyrl", "Sango": "sag_Latn", "Sanskrit": "san_Deva", "Santali": "sat_Olck", "Sicilian": "scn_Latn", "Shan": "shn_Mymr", "Sinhala": "sin_Sinh", "Slovak": "slk_Latn", "Slovenian": "slv_Latn", "Samoan": "smo_Latn", "Shona": "sna_Latn", "Sindhi": "snd_Arab", "Somali": "som_Latn", "Southern Sotho": "sot_Latn", "Spanish": "spa_Latn", "Tosk Albanian": "als_Latn", "Sardinian": "srd_Latn", "Serbian": "srp_Cyrl", "Swati": "ssw_Latn", "Sundanese": "sun_Latn", "Swedish": "swe_Latn", "Swahili": "swh_Latn", "Silesian": "szl_Latn", "Tamil": "tam_Taml", "Tatar": "tat_Cyrl", "Telugu": "tel_Telu", "Tajik": "tgk_Cyrl", "Tagalog": "tgl_Latn", "Thai": "tha_Thai", "Tigrinya": "tir_Ethi", "Tamasheq Latin": "taq_Latn", "Tamasheq Tifinagh": "taq_Tfng", "Tok Pisin": "tpi_Latn", "Tswana": "tsn_Latn", "Tsonga": "tso_Latn", "Turkmen": "tuk_Latn", "Tumbuka": "tum_Latn", "Turkish": "tur_Latn", "Twi": "twi_Latn", "Central Atlas Tamazight": "tzm_Tfng", "Uyghur": "uig_Arab", "Ukrainian": "ukr_Cyrl", "Umbundu": "umb_Latn", "Urdu": "urd_Arab", "Northern Uzbek": "uzn_Latn", "Venetian": "vec_Latn", "Vietnamese": "vie_Latn", "Waray": "war_Latn", "Wolof": "wol_Latn", "Xhosa": "xho_Latn", "Eastern Yiddish": "ydd_Hebr", "Yoruba": "yor_Latn", "Yue Chinese": "yue_Hant", "Chinese Simplified": "zho_Hans", "Chinese Traditional": "zho_Hant", "Standard Malay": "zsm_Latn", "Zulu": "zul_Latn", } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Union[str, Any] = '''facebook/nllb-200-distilled-600M''' A : Optional[Any] = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) A : Union[str, Any] = '''translator''' A : Any = AutoTokenizer A : List[Any] = AutoModelForSeqaSeqLM A : List[str] = LANGUAGE_CODES A : Tuple = ['''text''', '''text''', '''text'''] A : Union[str, Any] = ['''text'''] def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' if src_lang not in self.lang_to_code: raise ValueError(F"{src_lang} is not a supported language." ) if tgt_lang not in self.lang_to_code: raise ValueError(F"{tgt_lang} is not a supported language." ) SCREAMING_SNAKE_CASE : Tuple = self.lang_to_code[src_lang] SCREAMING_SNAKE_CASE : List[Any] = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( A, return_tensors='pt', src_lang=A, tgt_lang=A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.model.generate(**A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=A )
28
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase_ = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } UpperCamelCase_ = { "distilbert-base-uncased": 5_1_2, "distilbert-base-uncased-distilled-squad": 5_1_2, "distilbert-base-cased": 5_1_2, "distilbert-base-cased-distilled-squad": 5_1_2, "distilbert-base-german-cased": 5_1_2, "distilbert-base-multilingual-cased": 5_1_2, } UpperCamelCase_ = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION A : Optional[int] = ['''input_ids''', '''attention_mask'''] A : List[Any] = DistilBertTokenizer def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ): '''simple docstring''' super().__init__( A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase', A ) != do_lower_case or normalizer_state.get('strip_accents', A ) != strip_accents or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : List[str] = strip_accents SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A ) SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case def UpperCamelCase_ ( self, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A ) return tuple(A )
28
1
'''simple docstring''' from __future__ import annotations class _a : '''simple docstring''' def __init__( self, A = 0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = key def UpperCamelCase_ ( self, A, A ): '''simple docstring''' assert isinstance(A, A ) and isinstance(A, A ) SCREAMING_SNAKE_CASE : Tuple = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(A ) ^ key ) for ch in content] def UpperCamelCase_ ( self, A, A ): '''simple docstring''' assert isinstance(A, A ) and isinstance(A, A ) SCREAMING_SNAKE_CASE : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(A ) ^ key ) for ch in content] def UpperCamelCase_ ( self, A, A = 0 ): '''simple docstring''' assert isinstance(A, A ) and isinstance(A, A ) SCREAMING_SNAKE_CASE : str = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE : str = '' for ch in content: ans += chr(ord(A ) ^ key ) return ans def UpperCamelCase_ ( self, A, A = 0 ): '''simple docstring''' assert isinstance(A, A ) and isinstance(A, A ) SCREAMING_SNAKE_CASE : Any = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE : Dict = '' for ch in content: ans += chr(ord(A ) ^ key ) return ans def UpperCamelCase_ ( self, A, A = 0 ): '''simple docstring''' assert isinstance(A, A ) and isinstance(A, A ) try: with open(A ) as fin, open('encrypt.out', 'w+' ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(A, A ) ) except OSError: return False return True def UpperCamelCase_ ( self, A, A ): '''simple docstring''' assert isinstance(A, A ) and isinstance(A, A ) try: with open(A ) as fin, open('decrypt.out', 'w+' ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(A, A ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
28
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 UpperCamelCase_ = get_tests_dir("fixtures") class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock() SCREAMING_SNAKE_CASE : List[Any] = 500 SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : Any = HTTPError SCREAMING_SNAKE_CASE : Any = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request', return_value=A ) as mock_head: SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' ) def UpperCamelCase_ ( self ): '''simple docstring''' with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' ) SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained( 'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' ) self.assertIsNotNone(A ) @is_staging_test class _a ( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id='test-image-processor' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' ) except HTTPError: pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, ) SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained( F"{USER}/test-dynamic-image-processor", trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
28
1
'''simple docstring''' import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors UpperCamelCase_ = logging.getLogger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Any = '''sequence-classification''' def __init__( self, A ): '''simple docstring''' if type(A ) == dict: SCREAMING_SNAKE_CASE : Union[str, Any] = Namespace(**A ) SCREAMING_SNAKE_CASE : str = glue_output_modes[hparams.task] SCREAMING_SNAKE_CASE : Tuple = glue_tasks_num_labels[hparams.task] super().__init__(A, A, self.mode ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' return self.model(**A ) def UpperCamelCase_ ( self, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: SCREAMING_SNAKE_CASE : str = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None SCREAMING_SNAKE_CASE : Optional[Any] = self(**A ) SCREAMING_SNAKE_CASE : Tuple = outputs[0] SCREAMING_SNAKE_CASE : str = self.trainer.lr_schedulers[0]['scheduler'] SCREAMING_SNAKE_CASE : Any = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.hparams SCREAMING_SNAKE_CASE : Any = processors[args.task]() SCREAMING_SNAKE_CASE : Union[str, Any] = processor.get_labels() for mode in ["train", "dev"]: SCREAMING_SNAKE_CASE : Tuple = self._feature_file(A ) if os.path.exists(A ) and not args.overwrite_cache: logger.info('Loading features from cached file %s', A ) else: logger.info('Creating features from dataset file at %s', args.data_dir ) SCREAMING_SNAKE_CASE : int = ( processor.get_dev_examples(args.data_dir ) if mode == 'dev' else processor.get_train_examples(args.data_dir ) ) SCREAMING_SNAKE_CASE : Dict = convert_examples_to_features( A, self.tokenizer, max_length=args.max_seq_length, label_list=self.labels, output_mode=args.glue_output_mode, ) logger.info('Saving features into cached file %s', A ) torch.save(A, A ) def UpperCamelCase_ ( self, A, A, A = False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 'dev' if mode == 'test' else mode SCREAMING_SNAKE_CASE : str = self._feature_file(A ) logger.info('Loading features from cached file %s', A ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor([f.input_ids for f in features], dtype=torch.long ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([f.attention_mask for f in features], dtype=torch.long ) SCREAMING_SNAKE_CASE : int = torch.tensor([f.token_type_ids for f in features], dtype=torch.long ) if self.hparams.glue_output_mode == "classification": SCREAMING_SNAKE_CASE : int = torch.tensor([f.label for f in features], dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([f.label for f in features], dtype=torch.float ) return DataLoader( TensorDataset(A, A, A, A ), batch_size=A, shuffle=A, ) def UpperCamelCase_ ( self, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: SCREAMING_SNAKE_CASE : Optional[Any] = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None SCREAMING_SNAKE_CASE : List[Any] = self(**A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = outputs[:2] SCREAMING_SNAKE_CASE : Any = logits.detach().cpu().numpy() SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['labels'].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item() SCREAMING_SNAKE_CASE : Optional[int] = np.concatenate([x['pred'] for x in outputs], axis=0 ) if self.hparams.glue_output_mode == "classification": SCREAMING_SNAKE_CASE : List[str] = np.argmax(A, axis=1 ) elif self.hparams.glue_output_mode == "regression": SCREAMING_SNAKE_CASE : List[str] = np.squeeze(A ) SCREAMING_SNAKE_CASE : Any = np.concatenate([x['target'] for x in outputs], axis=0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )] SCREAMING_SNAKE_CASE : int = [[] for _ in range(out_label_ids.shape[0] )] SCREAMING_SNAKE_CASE : Optional[Any] = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task, A, A )} SCREAMING_SNAKE_CASE : List[Any] = dict(results.items() ) SCREAMING_SNAKE_CASE : Union[str, Any] = results return ret, preds_list, out_label_list def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._eval_end(A ) SCREAMING_SNAKE_CASE : Dict = ret['log'] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self._eval_end(A ) SCREAMING_SNAKE_CASE : str = ret['log'] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def UpperCamelCase_ ( A, A ): '''simple docstring''' BaseTransformer.add_model_specific_args(A, A ) parser.add_argument( '--max_seq_length', default=128, type=A, help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ), ) parser.add_argument( '--task', default='', type=A, required=A, help='The GLUE task to run', ) parser.add_argument( '--gpus', default=0, type=A, help='The number of GPUs allocated for this, it is by default 0 meaning none', ) parser.add_argument( '--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets' ) return parser def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() add_generic_args(__UpperCamelCase ,os.getcwd() ) SCREAMING_SNAKE_CASE : List[str] = GLUETransformer.add_model_specific_args(__UpperCamelCase ,os.getcwd() ) SCREAMING_SNAKE_CASE : Dict = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join( './results' ,f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" ,) os.makedirs(args.output_dir ) SCREAMING_SNAKE_CASE : str = GLUETransformer(__UpperCamelCase ) SCREAMING_SNAKE_CASE : str = generic_train(__UpperCamelCase ,__UpperCamelCase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: SCREAMING_SNAKE_CASE : List[str] = sorted(glob.glob(os.path.join(args.output_dir ,'checkpoint-epoch=*.ckpt' ) ,recursive=__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Optional[Any] = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(__UpperCamelCase ) if __name__ == "__main__": main()
28
'''simple docstring''' class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = val SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Union[str, Any] = None def UpperCamelCase_ ( self, A ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: SCREAMING_SNAKE_CASE : Optional[int] = Node(A ) else: self.left.insert(A ) elif val > self.val: if self.right is None: SCREAMING_SNAKE_CASE : int = Node(A ) else: self.right.insert(A ) else: SCREAMING_SNAKE_CASE : int = val def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ): """simple docstring""" if root: inorder(root.left ,__UpperCamelCase ) res.append(root.val ) inorder(root.right ,__UpperCamelCase ) def lowercase__( __UpperCamelCase: List[Any] ): """simple docstring""" if len(__UpperCamelCase ) == 0: return arr SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] ) for i in range(1 ,len(__UpperCamelCase ) ): root.insert(arr[i] ) # Traverse BST in order. SCREAMING_SNAKE_CASE : Dict = [] inorder(__UpperCamelCase ,__UpperCamelCase ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
28
1
'''simple docstring''' from __future__ import annotations import queue class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = data SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : List[str] = None def lowercase__( ): """simple docstring""" print('\n********Press N to stop entering at any point of time********\n' ) SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower() SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) ) q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: " SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = left_node q.put(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: " SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = right_node q.put(__UpperCamelCase ) raise def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return print(node.data ,end=',' ) pre_order(node.left ) pre_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return in_order(node.left ) print(node.data ,end=',' ) in_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data ,end=',' ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Optional[int] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Union[str, Any] = [] while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__UpperCamelCase ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : Optional[Any] = node while n or stack: while n: # start from root node, find its left child print(n.data ,end=',' ) stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE : List[Any] = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE : Any = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : int = node while n or stack: while n: stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = n.left SCREAMING_SNAKE_CASE : Tuple = stack.pop() print(n.data ,end=',' ) SCREAMING_SNAKE_CASE : str = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], [] SCREAMING_SNAKE_CASE : Optional[int] = node stacka.append(__UpperCamelCase ) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__UpperCamelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data ,end=',' ) def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ): """simple docstring""" if not s: return "\n" + width * char SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 ) return f"{left * char} {s} {(left + extra) * char}" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) UpperCamelCase_ = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 5_0 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
28
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ): """simple docstring""" from .. import __version__ SCREAMING_SNAKE_CASE : int = take_from SCREAMING_SNAKE_CASE : Optional[int] = () if not isinstance(args[0] ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : List[str] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ): raise ValueError( f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" f" version {__version__} is >= {version_name}" ) SCREAMING_SNAKE_CASE : Tuple = None if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(__UpperCamelCase ,__UpperCamelCase ): values += (getattr(__UpperCamelCase ,__UpperCamelCase ),) SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else '' warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0: SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1] SCREAMING_SNAKE_CASE : Any = call_frame.filename SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" ) if len(__UpperCamelCase ) == 0: return elif len(__UpperCamelCase ) == 1: return values[0] return values
28
1
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration UpperCamelCase_ = 5_0_0_0_0 UpperCamelCase_ = 5_0_0_0 UpperCamelCase_ , UpperCamelCase_ = os.path.split(__file__) UpperCamelCase_ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def lowercase__( __UpperCamelCase: datasets.Dataset ,__UpperCamelCase: List[Any] ): """simple docstring""" for i in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE : str = dataset[i] @get_duration def lowercase__( __UpperCamelCase: datasets.Dataset ,__UpperCamelCase: Tuple ,__UpperCamelCase: List[str] ): """simple docstring""" for i in range(0 ,len(__UpperCamelCase ) ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : Optional[int] = dataset[i : i + batch_size] @get_duration def lowercase__( __UpperCamelCase: datasets.Dataset ,__UpperCamelCase: List[str] ,__UpperCamelCase: Dict ): """simple docstring""" with dataset.formatted_as(type=__UpperCamelCase ): for i in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE : Tuple = dataset[i] @get_duration def lowercase__( __UpperCamelCase: datasets.Dataset ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ): """simple docstring""" with dataset.formatted_as(type=__UpperCamelCase ): for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ): SCREAMING_SNAKE_CASE : str = dataset[i : i + batch_size] def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = {'num examples': SPEED_TEST_N_EXAMPLES} SCREAMING_SNAKE_CASE : Union[str, Any] = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_00}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10_00}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10_00}), ] SCREAMING_SNAKE_CASE : Optional[Any] = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_00}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10_00}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10_00}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) SCREAMING_SNAKE_CASE : Any = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) SCREAMING_SNAKE_CASE : int = generate_example_dataset( os.path.join(__UpperCamelCase ,'dataset.arrow' ) ,__UpperCamelCase ,num_examples=__UpperCamelCase ,seq_shapes={'list': (1_00,)} ,) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ ,str(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : List[Any] = func(__UpperCamelCase ,**__UpperCamelCase ) print('shuffling dataset' ) SCREAMING_SNAKE_CASE : List[Any] = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' ,func.__name__ ,str(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : List[str] = func( __UpperCamelCase ,**__UpperCamelCase ) with open(__UpperCamelCase ,'wb' ) as f: f.write(json.dumps(__UpperCamelCase ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
28
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
1
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase_ = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise TypeError('Input value must be an \'int\' type' ) SCREAMING_SNAKE_CASE : int = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
28
1
'''simple docstring''' from random import randint, random def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: bool = False ,__UpperCamelCase: bool = False ,__UpperCamelCase: int = 5 ,): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [[-1] * number_of_cells] # Create a highway without any car SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Dict = max(__UpperCamelCase ,0 ) while i < number_of_cells: SCREAMING_SNAKE_CASE : int = ( randint(0 ,__UpperCamelCase ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 ,max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : List[Any] = highway_now[car_index + 1 :] for cell in range(len(__UpperCamelCase ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(__UpperCamelCase ,-1 ) def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: float ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : int = len(__UpperCamelCase ) # Beforce calculations, the highway is empty SCREAMING_SNAKE_CASE : int = [-1] * number_of_cells for car_index in range(__UpperCamelCase ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed SCREAMING_SNAKE_CASE : List[Any] = min(highway_now[car_index] + 1 ,__UpperCamelCase ) # Number of empty cell before the next car SCREAMING_SNAKE_CASE : Dict = get_distance(__UpperCamelCase ,__UpperCamelCase ) - 1 # We can't have the car causing an accident SCREAMING_SNAKE_CASE : Dict = min(next_highway[car_index] ,__UpperCamelCase ) if random() < probability: # Randomly, a driver will slow down SCREAMING_SNAKE_CASE : List[str] = max(next_highway[car_index] - 1 ,0 ) return next_highway def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = len(highway[0] ) for i in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE : Optional[Any] = update(highway[i] ,__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : Tuple = [-1] * number_of_cells for car_index in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE : int = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) SCREAMING_SNAKE_CASE : Union[str, Any] = (car_index + speed) % number_of_cells # Commit the change of position SCREAMING_SNAKE_CASE : List[Any] = speed highway.append(__UpperCamelCase ) return highway if __name__ == "__main__": import doctest doctest.testmod()
28
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ): '''simple docstring''' if tokenize_kwargs is None: SCREAMING_SNAKE_CASE : Optional[int] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) SCREAMING_SNAKE_CASE : Tuple = truncation SCREAMING_SNAKE_CASE : int = tokenize_kwargs SCREAMING_SNAKE_CASE : Optional[Any] = {} if return_tensors is not None: SCREAMING_SNAKE_CASE : Optional[int] = return_tensors return preprocess_params, {}, postprocess_params def UpperCamelCase_ ( self, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.framework SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A ) return model_inputs def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model(**A ) return model_outputs def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self, *A, **A ): '''simple docstring''' return super().__call__(*A, **A )
28
1
'''simple docstring''' def lowercase__( __UpperCamelCase: int = 10_00 ): """simple docstring""" return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"""{solution() = }""")
28
'''simple docstring''' from __future__ import annotations import queue class _a : '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = data SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : List[str] = None def lowercase__( ): """simple docstring""" print('\n********Press N to stop entering at any point of time********\n' ) SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower() SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) ) q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: " SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = left_node q.put(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: " SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n' if check == "n": return tree_node SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE : Any = right_node q.put(__UpperCamelCase ) raise def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return print(node.data ,end=',' ) pre_order(node.left ) pre_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return in_order(node.left ) print(node.data ,end=',' ) in_order(node.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data ,end=',' ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Optional[int] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): SCREAMING_SNAKE_CASE : Union[str, Any] = [] while not q.empty(): SCREAMING_SNAKE_CASE : List[Any] = q.get() print(node_dequeued.data ,end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__UpperCamelCase ) def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : Optional[Any] = node while n or stack: while n: # start from root node, find its left child print(n.data ,end=',' ) stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE : List[Any] = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE : Any = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : int = node while n or stack: while n: stack.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = n.left SCREAMING_SNAKE_CASE : Tuple = stack.pop() print(n.data ,end=',' ) SCREAMING_SNAKE_CASE : str = n.right def lowercase__( __UpperCamelCase: TreeNode ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], [] SCREAMING_SNAKE_CASE : Optional[int] = node stacka.append(__UpperCamelCase ) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__UpperCamelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data ,end=',' ) def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ): """simple docstring""" if not s: return "\n" + width * char SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 ) return f"{left * char} {s} {(left + extra) * char}" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) UpperCamelCase_ = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 5_0 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
28
1
'''simple docstring''' from ...processing_utils import ProcessorMixin class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Optional[Any] = ['''image_processor''', '''feature_extractor'''] A : Any = '''TvltImageProcessor''' A : int = '''TvltFeatureExtractor''' def __init__( self, A, A ): '''simple docstring''' super().__init__(image_processor=A, feature_extractor=A ) SCREAMING_SNAKE_CASE : Tuple = image_processor SCREAMING_SNAKE_CASE : Tuple = feature_extractor def __call__( self, A=None, A=None, A=None, A=None, A=False, A=False, *A, **A, ): '''simple docstring''' if images is None and audio is None: raise ValueError('You need to specify either an `images` or `audio` input to process.' ) SCREAMING_SNAKE_CASE : Tuple = None if images is not None: SCREAMING_SNAKE_CASE : List[str] = self.image_processor(A, mask_pixel=A, *A, **A ) if images_mixed is not None: SCREAMING_SNAKE_CASE : Any = self.image_processor(A, is_mixed=A, *A, **A ) if audio is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor( A, *A, sampling_rate=A, mask_audio=A, **A ) SCREAMING_SNAKE_CASE : List[str] = {} if audio is not None: output_dict.update(A ) if images is not None: output_dict.update(A ) if images_mixed_dict is not None: output_dict.update(A ) return output_dict @property def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.image_processor.model_input_names SCREAMING_SNAKE_CASE : Tuple = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
28
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class _a : '''simple docstring''' def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = device SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A ) SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std ) SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 ) SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.resize(A ) SCREAMING_SNAKE_CASE : Any = self.center_crop(A ) SCREAMING_SNAKE_CASE : str = self.normalize(A ) return images def __call__( self, A=None, A=None, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A ) SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A ) SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class _a ( nn.Module ): '''simple docstring''' def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device() if vqgan: SCREAMING_SNAKE_CASE : Optional[Any] = vqgan else: SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A ) self.vqgan.eval() if clip: SCREAMING_SNAKE_CASE : List[str] = clip else: SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' ) self.clip.to(self.device ) SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = iterations SCREAMING_SNAKE_CASE : Tuple = lr SCREAMING_SNAKE_CASE : Tuple = log SCREAMING_SNAKE_CASE : str = make_grid SCREAMING_SNAKE_CASE : Dict = return_val SCREAMING_SNAKE_CASE : Union[str, Any] = quantize SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [] if output_path is None: SCREAMING_SNAKE_CASE : int = './animation.gif' if input_path is None: SCREAMING_SNAKE_CASE : Optional[int] = self.save_path SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) ) if not len(A ): raise ValueError( 'No images found in save path, aborting (did you pass save_intermediate=True to the generate' ' function?)' ) if len(A ) == 1: print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' ) SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A ) SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A ) if extend_frames: SCREAMING_SNAKE_CASE : List[str] = 1.5 SCREAMING_SNAKE_CASE : int = 3 for file_name in paths: if file_name.endswith('.png' ): images.append(imageio.imread(A ) ) imageio.mimsave(A, A, duration=A ) print(F"gif saved to {output_path}" ) def UpperCamelCase_ ( self, A=None, A=None ): '''simple docstring''' if not (path or img): raise ValueError('Input either path or tensor' ) if img is not None: raise NotImplementedError SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device ) SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A ) SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A ) return z def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_() SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector if self.quantize: SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A ) else: SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent return self.vqgan.decode(A ) def UpperCamelCase_ ( self, A, A, A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A ) SCREAMING_SNAKE_CASE : str = self.clip(**A ) SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image if weights is not None: SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) ) if neg_prompts: SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] ) else: SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device ) SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A ) return loss def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device ) SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A ) SCREAMING_SNAKE_CASE : Dict = loop_post_process(A ) SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A ) print('CLIP loss', A ) if self.log: wandb.log({'CLIP Loss': clip_loss} ) clip_loss.backward(retain_graph=A ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' wandb.init(reinit=A, project='face-editor' ) wandb.config.update({'Positive Prompts': positive_prompts} ) wandb.config.update({'Negative Prompts': negative_prompts} ) wandb.config.update({'lr': self.lr, 'iterations': self.iterations} ) if image_path: SCREAMING_SNAKE_CASE : Tuple = Image.open(A ) SCREAMING_SNAKE_CASE : int = image.resize((256, 256) ) wandb.log('Original Image', wandb.Image(A ) ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if not prompts: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Dict = [] if isinstance(A, A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )] for prompt in prompts: if isinstance(A, (tuple, list) ): SCREAMING_SNAKE_CASE : List[str] = prompt[0] SCREAMING_SNAKE_CASE : Any = float(prompt[1] ) elif ":" in prompt: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' ) SCREAMING_SNAKE_CASE : Any = float(A ) else: SCREAMING_SNAKE_CASE : Dict = prompt SCREAMING_SNAKE_CASE : List[Any] = 1.0 processed_prompts.append(A ) weights.append(A ) return { "prompts": processed_prompts, "weights": torch.tensor(A, device=self.device ), } def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ): '''simple docstring''' if image_path: SCREAMING_SNAKE_CASE : int = self._get_latent(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(A, A, A ) assert pos_prompts, "You must provide at least one positive prompt." SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A ) if save_final and save_path is None: SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) ) if not os.path.exists(A ): os.makedirs(A ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp() os.makedirs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = save_path SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print('Original Image' ) show_pil(custom_to_pil(A ) ) SCREAMING_SNAKE_CASE : int = loop_post_process(A ) for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ): if show_intermediate: show_pil(A ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) ) if self.log: wandb.log({'Image': wandb.Image(A )} ) if show_final: show_pil(A ) if save_final: transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
28
1
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCamelCase_ = logging.getLogger(__name__) def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ): """simple docstring""" return (preds == labels).mean() @dataclass class _a : '''simple docstring''' A : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) A : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) A : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) A : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class _a : '''simple docstring''' A : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) A : str = field(metadata={'''help''': '''Should contain the data files for the task.'''} ) A : int = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) A : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' ,__UpperCamelCase ) # Set seed set_seed(training_args.seed ) try: SCREAMING_SNAKE_CASE : Optional[Any] = processors[data_args.task_name]() SCREAMING_SNAKE_CASE : List[Any] = processor.get_labels() SCREAMING_SNAKE_CASE : Optional[Any] = len(__UpperCamelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__UpperCamelCase ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,) SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,) SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,) # Get datasets SCREAMING_SNAKE_CASE : List[str] = ( MultipleChoiceDataset( data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,) if training_args.do_train else None ) SCREAMING_SNAKE_CASE : str = ( MultipleChoiceDataset( data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,) if training_args.do_eval else None ) def compute_metrics(__UpperCamelCase: EvalPrediction ) -> Dict: SCREAMING_SNAKE_CASE : Any = np.argmax(p.predictions ,axis=1 ) return {"acc": simple_accuracy(__UpperCamelCase ,p.label_ids )} # Data collator SCREAMING_SNAKE_CASE : str = DataCollatorWithPadding(__UpperCamelCase ,pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer SCREAMING_SNAKE_CASE : Union[str, Any] = Trainer( model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=__UpperCamelCase ,eval_dataset=__UpperCamelCase ,compute_metrics=__UpperCamelCase ,data_collator=__UpperCamelCase ,) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation SCREAMING_SNAKE_CASE : List[str] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) SCREAMING_SNAKE_CASE : Any = trainer.evaluate() SCREAMING_SNAKE_CASE : Any = os.path.join(training_args.output_dir ,'eval_results.txt' ) if trainer.is_world_master(): with open(__UpperCamelCase ,'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' ,__UpperCamelCase ,__UpperCamelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(__UpperCamelCase ) return results def lowercase__( __UpperCamelCase: Tuple ): """simple docstring""" main() if __name__ == "__main__": main()
28
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A ) def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet( A, A, A, A, A, A, A, A, A, A, A, ) # merge samples if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample else: SCREAMING_SNAKE_CASE : str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(A, A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Optional[int] = save_directory for controlnet in self.nets: controlnet.save_pretrained( A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, ) idx += 1 SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}" @classmethod def UpperCamelCase_ ( cls, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : List[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path while os.path.isdir(A ): SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A ) controlnets.append(A ) idx += 1 SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}" logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." ) if len(A ) == 0: raise ValueError( F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." ) return cls(A )
28
1
'''simple docstring''' UpperCamelCase_ = [ "DownloadConfig", "DownloadManager", "DownloadMode", "StreamingDownloadManager", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
28
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : str = ['''audio_values''', '''audio_mask'''] def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ): '''simple docstring''' super().__init__( feature_size=A, sampling_rate=A, padding_value=A, **A, ) SCREAMING_SNAKE_CASE : str = spectrogram_length SCREAMING_SNAKE_CASE : Optional[Any] = num_channels SCREAMING_SNAKE_CASE : List[str] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1] SCREAMING_SNAKE_CASE : Dict = n_fft SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate SCREAMING_SNAKE_CASE : str = sampling_rate SCREAMING_SNAKE_CASE : int = padding_value SCREAMING_SNAKE_CASE : Any = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = spectrogram( A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, ) SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1] SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0 SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled" F" with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) SCREAMING_SNAKE_CASE : int = is_batched_numpy or ( isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A, np.ndarray ): SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa ) elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis SCREAMING_SNAKE_CASE : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], A ): SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask SCREAMING_SNAKE_CASE : Tuple = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: SCREAMING_SNAKE_CASE : List[Any] = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa ) # convert into correct format for padding SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value for i in range(len(A ) ): SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i] SCREAMING_SNAKE_CASE : Union[str, Any] = feature # return as BatchFeature if return_attention_mask: SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features} SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A ) return encoded_inputs
28
1
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A, A, A=1_024, A=1_024, A=3.6 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = tokenizer SCREAMING_SNAKE_CASE : str = tokenizer.bos_token_id SCREAMING_SNAKE_CASE : Tuple = dataset SCREAMING_SNAKE_CASE : List[Any] = seq_length SCREAMING_SNAKE_CASE : Tuple = seq_length * chars_per_token * num_of_sequences def __iter__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = iter(self.dataset ) SCREAMING_SNAKE_CASE : List[Any] = True while more_examples: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(A )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: SCREAMING_SNAKE_CASE : Dict = False break SCREAMING_SNAKE_CASE : Dict = tokenizer(A, truncation=A )['input_ids'] SCREAMING_SNAKE_CASE : List[str] = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0, len(A ), self.seq_length ): SCREAMING_SNAKE_CASE : Union[str, Any] = all_token_ids[i : i + self.seq_length] if len(A ) == self.seq_length: yield torch.tensor(A ) def lowercase__( __UpperCamelCase: Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = {'streaming': True} SCREAMING_SNAKE_CASE : Dict = load_dataset(args.dataset_name ,split='train' ,**__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = ConstantLengthDataset(__UpperCamelCase ,__UpperCamelCase ,seq_length=args.seq_length ) SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(__UpperCamelCase ,batch_size=args.batch_size ) return eval_dataloader def lowercase__( __UpperCamelCase: Any ): """simple docstring""" model.eval() SCREAMING_SNAKE_CASE : Any = [] for step, batch in enumerate(__UpperCamelCase ): with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(__UpperCamelCase ,labels=__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[str] = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__UpperCamelCase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break SCREAMING_SNAKE_CASE : str = torch.mean(torch.cat(__UpperCamelCase ) ) try: SCREAMING_SNAKE_CASE : Any = torch.exp(__UpperCamelCase ) except OverflowError: SCREAMING_SNAKE_CASE : List[str] = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator UpperCamelCase_ = Accelerator() # Parse configuration UpperCamelCase_ = HfArgumentParser(EvaluationArguments) UpperCamelCase_ = parser.parse_args() set_seed(args.seed) # Logging UpperCamelCase_ = logging.getLogger(__name__) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) # Load model and tokenizer UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt) UpperCamelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader UpperCamelCase_ = create_dataloader(args) # Prepare everything with our `accelerator`. UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("Evaluating and saving model after training") UpperCamelCase_ , UpperCamelCase_ = evaluate(args) logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
28
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841 SCREAMING_SNAKE_CASE : Optional[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] ) SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
28
1
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _a : '''simple docstring''' def __init__( self, A, A=13, A=32, A=3, A=4, A=[10, 20, 30, 40], A=[2, 2, 3, 2], A=True, A=True, A=37, A="gelu", A=10, A=0.02, A=["stage2", "stage3", "stage4"], A=[2, 3, 4], A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = parent SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : List[Any] = num_channels SCREAMING_SNAKE_CASE : str = num_stages SCREAMING_SNAKE_CASE : Optional[Any] = hidden_sizes SCREAMING_SNAKE_CASE : List[str] = depths SCREAMING_SNAKE_CASE : Tuple = is_training SCREAMING_SNAKE_CASE : Any = use_labels SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = num_labels SCREAMING_SNAKE_CASE : Optional[int] = initializer_range SCREAMING_SNAKE_CASE : int = out_features SCREAMING_SNAKE_CASE : Any = out_indices SCREAMING_SNAKE_CASE : Dict = scope def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Optional[int] = None if self.use_labels: SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size], self.num_labels ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=A, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ConvNextVaModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model(A ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = ConvNextVaForImageClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : List[Any] = model(A, labels=A ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = ConvNextVaBackbone(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : int = model(A ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] ) # verify backbone works with out_features=None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[Any] = ConvNextVaBackbone(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model(A ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ), 1 ) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = config_and_inputs SCREAMING_SNAKE_CASE : List[str] = {'pixel_values': pixel_values, 'labels': labels} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Any = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) A : List[str] = ( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) A : Any = False A : Tuple = False A : Union[str, Any] = False A : Any = False A : List[Any] = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = ConvNextVaModelTester(self ) SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self, config_class=A, has_text_modality=A, hidden_size=37 ) def UpperCamelCase_ ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self ): '''simple docstring''' return @unittest.skip(reason='ConvNextV2 does not use inputs_embeds' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='ConvNextV2 does not support input and output embeddings' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='ConvNextV2 does not use feedforward chunking' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_with_labels() SCREAMING_SNAKE_CASE : Tuple = True if model_class.__name__ in [ *get_values(A ), *get_values(A ), ]: continue SCREAMING_SNAKE_CASE : Optional[int] = model_class(A ) model.to(A ) model.train() SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(A, A, return_labels=A ) SCREAMING_SNAKE_CASE : int = model(**A ).loss loss.backward() def UpperCamelCase_ ( self ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = True if ( model_class.__name__ in [*get_values(A ), *get_values(A )] or not model_class.supports_gradient_checkpointing ): continue SCREAMING_SNAKE_CASE : List[Any] = model_class(A ) model.to(A ) model.gradient_checkpointing_enable() model.train() SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(A, A, return_labels=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(**A ).loss loss.backward() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : int = model_class(A ) SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1], A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' def check_hidden_states_output(A, A, A ): SCREAMING_SNAKE_CASE : Optional[int] = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(A, A ) ) SCREAMING_SNAKE_CASE : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(A ), expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Any = True check_hidden_states_output(A, A, A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : str = True check_hidden_states_output(A, A, A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Optional[Any] = ConvNextVaModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _a ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(A ) SCREAMING_SNAKE_CASE : int = self.default_image_processor SCREAMING_SNAKE_CASE : Any = prepare_img() SCREAMING_SNAKE_CASE : Union[str, Any] = preprocessor(images=A, return_tensors='pt' ).to(A ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**A ) # verify the logits SCREAMING_SNAKE_CASE : int = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, A ) SCREAMING_SNAKE_CASE : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
28
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : int = StableDiffusionDiffEditPipeline A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} A : str = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A : Union[str, Any] = frozenset([] ) def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, ) SCREAMING_SNAKE_CASE : int = DDIMScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, ) SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, ) SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE : int = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Dict = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Any = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' if not hasattr(self.pipeline_class, '_optional_components' ): return SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A, A, A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0] SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max() self.assertLess(A, 1E-4 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = 'cpu' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A ) SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16) ) SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 ) SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) self.assertEqual(mask[0, -3, -4], 0 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A ) SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A ) SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) @require_torch_gpu @slow class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) ) SCREAMING_SNAKE_CASE : List[str] = raw_image def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit' SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears' SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents SCREAMING_SNAKE_CASE : List[str] = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : List[Any] = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = 'a bowl of fruit' SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears' SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents SCREAMING_SNAKE_CASE : str = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : Tuple = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
28
1
'''simple docstring''' import sys from collections import defaultdict class _a : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [] def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.node_position[vertex] def UpperCamelCase_ ( self, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = pos def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: SCREAMING_SNAKE_CASE : Optional[Any] = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: SCREAMING_SNAKE_CASE : str = 2 * start + 1 else: SCREAMING_SNAKE_CASE : Optional[int] = 2 * start + 2 if heap[smallest_child] < heap[start]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = heap[smallest_child], positions[smallest_child] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = ( heap[start], positions[start], ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = temp, tempa SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child], self.get_position(positions[start] ) ) self.set_position(positions[start], A ) self.top_to_bottom(A, A, A, A ) def UpperCamelCase_ ( self, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = position[index] while index != 0: SCREAMING_SNAKE_CASE : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: SCREAMING_SNAKE_CASE : List[Any] = heap[parent] SCREAMING_SNAKE_CASE : List[str] = position[parent] self.set_position(position[parent], A ) else: SCREAMING_SNAKE_CASE : Optional[int] = val SCREAMING_SNAKE_CASE : List[Any] = temp self.set_position(A, A ) break SCREAMING_SNAKE_CASE : Union[str, Any] = parent else: SCREAMING_SNAKE_CASE : Optional[int] = val SCREAMING_SNAKE_CASE : Any = temp self.set_position(A, 0 ) def UpperCamelCase_ ( self, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = len(A ) // 2 - 1 for i in range(A, -1, -1 ): self.top_to_bottom(A, A, len(A ), A ) def UpperCamelCase_ ( self, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = positions[0] SCREAMING_SNAKE_CASE : Any = sys.maxsize self.top_to_bottom(A, 0, len(A ), A ) return temp def lowercase__( __UpperCamelCase: Any ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = Heap() SCREAMING_SNAKE_CASE : Optional[Any] = [0] * len(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Tuple = [-1] * len(__UpperCamelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph SCREAMING_SNAKE_CASE : str = [] # Heap of Distance of vertices from their neighboring vertex SCREAMING_SNAKE_CASE : Tuple = [] for vertex in range(len(__UpperCamelCase ) ): distance_tv.append(sys.maxsize ) positions.append(__UpperCamelCase ) heap.node_position.append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : int = 1 SCREAMING_SNAKE_CASE : Union[str, Any] = sys.maxsize for neighbor, distance in adjacency_list[0]: SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : Optional[int] = distance heap.heapify(__UpperCamelCase ,__UpperCamelCase ) for _ in range(1 ,len(__UpperCamelCase ) ): SCREAMING_SNAKE_CASE : List[Any] = heap.delete_minimum(__UpperCamelCase ,__UpperCamelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) SCREAMING_SNAKE_CASE : Optional[int] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__UpperCamelCase )] ): SCREAMING_SNAKE_CASE : Optional[int] = distance heap.bottom_to_top( __UpperCamelCase ,heap.get_position(__UpperCamelCase ) ,__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCamelCase_ = int(input("Enter number of edges: ").strip()) UpperCamelCase_ = defaultdict(list) for _ in range(edges_number): UpperCamelCase_ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
28
'''simple docstring''' def lowercase__( __UpperCamelCase: int = 1_00_00_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )] for i in range(2 ,limit + 1 ): if phi[i] == i - 1: for j in range(2 * i ,limit + 1 ,__UpperCamelCase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
28
1