code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class a__ ( _UpperCAmelCase ): """simple docstring""" def __init__(self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=64 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=16 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=3 , __lowercase=4 , __lowercase=None , __lowercase=2 , __lowercase=2 , __lowercase=2 , __lowercase=2 , __lowercase=4 , __lowercase=1 , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_input_mask __lowerCAmelCase = use_token_type_ids __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = num_labels __lowerCAmelCase = num_choices __lowerCAmelCase = scope __lowerCAmelCase = q_groups __lowerCAmelCase = k_groups __lowerCAmelCase = v_groups __lowerCAmelCase = post_attention_groups __lowerCAmelCase = intermediate_groups __lowerCAmelCase = output_groups def _snake_case (self ): __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_input_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case (self ): return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = SqueezeBertModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() __lowerCAmelCase = model(lowercase_ , lowercase_ ) __lowerCAmelCase = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = SqueezeBertForMaskedLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() __lowerCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = SqueezeBertForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() __lowerCAmelCase = model( lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = self.num_labels __lowerCAmelCase = SqueezeBertForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() __lowerCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = self.num_labels __lowerCAmelCase = SqueezeBertForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() __lowerCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = self.num_choices __lowerCAmelCase = SqueezeBertForMultipleChoice(config=lowercase_ ) model.to(lowercase_ ) model.eval() __lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase = model( lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case (self ): __lowerCAmelCase = self.prepare_config_and_inputs() (__lowerCAmelCase) = config_and_inputs __lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Tuple = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) __UpperCamelCase : Any = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase : str = False __UpperCamelCase : Union[str, Any] = True __UpperCamelCase : str = False def _snake_case (self ): __lowerCAmelCase = SqueezeBertModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=lowercase_ , dim=37 ) def _snake_case (self ): self.config_tester.run_common_tests() def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*lowercase_ ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase_ ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase_ ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase_ ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase_ ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase_ ) @slow def _snake_case (self ): for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = SqueezeBertModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @require_sentencepiece @require_tokenizers @require_torch class a__ ( unittest.TestCase ): """simple docstring""" @slow def _snake_case (self ): __lowerCAmelCase = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' ) __lowerCAmelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] ) __lowerCAmelCase = model(lowercase_ )[0] __lowerCAmelCase = torch.Size((1, 3) ) self.assertEqual(output.shape , lowercase_ ) __lowerCAmelCase = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] ) self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
370
'''simple docstring''' import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed _UpperCAmelCase : Dict = """true""" def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6): set_seed(4_2) __lowerCAmelCase = RegressionModel() __lowerCAmelCase = deepcopy(lowerCamelCase) __lowerCAmelCase = RegressionDataset(length=lowerCamelCase) __lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase) model.to(accelerator.device) __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase) return model, ddp_model, dataloader def __magic_name__( lowerCamelCase, lowerCamelCase=False): __lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''') __lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''') def tokenize_function(lowerCamelCase): __lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase) return outputs with accelerator.main_process_first(): __lowerCAmelCase = dataset.map( lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) __lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''') def collate_fn(lowerCamelCase): if use_longest: return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''') return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''') return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6) def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase) __lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [] for batch in dataloader: __lowerCAmelCase , __lowerCAmelCase = batch.values() with torch.no_grad(): __lowerCAmelCase = model(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) __lowerCAmelCase , __lowerCAmelCase = [], [] for logit, targ in logits_and_targets: logits.append(lowerCamelCase) targs.append(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase) return logits, targs def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase) assert ( len(lowerCamelCase) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}""" def __magic_name__( lowerCamelCase = False, lowerCamelCase = False): __lowerCAmelCase = evaluate.load('''glue''', '''mrpc''') __lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase) # First do baseline __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no'''] model.to(lowerCamelCase) model.eval() for batch in dataloader: batch.to(lowerCamelCase) with torch.inference_mode(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=lowerCamelCase, references=batch['''labels''']) __lowerCAmelCase = metric.compute() # Then do distributed __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) __lowerCAmelCase = batch['''labels'''] __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase) __lowerCAmelCase = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def __magic_name__( ): __lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''') for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""") test_mrpc(lowerCamelCase, lowerCamelCase) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''') for split_batches in [True, False]: for dispatch_batches in [True, False]: __lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""") test_torch_metrics(lowerCamelCase, 9_9) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''') __lowerCAmelCase = Accelerator() test_torch_metrics(lowerCamelCase, 5_1_2) accelerator.state._reset_state() def __magic_name__( lowerCamelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
9
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _UpperCAmelCase : Tuple = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Union[str, Any] = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
371
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : str = logging.get_logger(__name__) _UpperCAmelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class a__ ( __A ): """simple docstring""" __UpperCamelCase : str = 'roberta' def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ): super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = use_cache __lowerCAmelCase = classifier_dropout class a__ ( __A ): """simple docstring""" @property def _snake_case (self ): if self.task == "multiple-choice": __lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
9
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _UpperCAmelCase : Tuple = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : int = [ """UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""", """UniSpeechForCTC""", """UniSpeechForPreTraining""", """UniSpeechForSequenceClassification""", """UniSpeechModel""", """UniSpeechPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
350
'''simple docstring''' import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = old_name if "patch_embed" in old_name: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_name.split('''.''') if layer == "0": __lowerCAmelCase = old_name.replace('''0''', '''convolution1''') elif layer == "1": __lowerCAmelCase = old_name.replace('''1''', '''batchnorm_before''') elif layer == "3": __lowerCAmelCase = old_name.replace('''3''', '''convolution2''') else: __lowerCAmelCase = old_name.replace('''4''', '''batchnorm_after''') if "network" in old_name and re.search(r'''\d\.\d''', lowerCamelCase): __lowerCAmelCase = r'''\b\d{2}\b''' if bool(re.search(lowerCamelCase, lowerCamelCase)): __lowerCAmelCase = re.search(r'''\d\.\d\d.''', lowerCamelCase).group() else: __lowerCAmelCase = re.search(r'''\d\.\d.''', lowerCamelCase).group() if int(match[0]) < 6: __lowerCAmelCase = old_name.replace(lowerCamelCase, '''''') __lowerCAmelCase = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1]) __lowerCAmelCase = '''intermediate_stages.''' + trimmed_name else: __lowerCAmelCase = old_name.replace(lowerCamelCase, '''''') if int(match[2]) < num_meta4D_last_stage: __lowerCAmelCase = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2]) else: __lowerCAmelCase = str(int(match[2]) - num_meta4D_last_stage) __lowerCAmelCase = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index) if "norm1" in old_name: __lowerCAmelCase = trimmed_name.replace('''norm1''', '''layernorm1''') elif "norm2" in old_name: __lowerCAmelCase = trimmed_name.replace('''norm2''', '''layernorm2''') elif "fc1" in old_name: __lowerCAmelCase = trimmed_name.replace('''fc1''', '''linear_in''') elif "fc2" in old_name: __lowerCAmelCase = trimmed_name.replace('''fc2''', '''linear_out''') __lowerCAmelCase = '''last_stage.''' + trimmed_name elif "network" in old_name and re.search(r'''.\d.''', lowerCamelCase): __lowerCAmelCase = old_name.replace('''network''', '''intermediate_stages''') if "fc" in new_name: __lowerCAmelCase = new_name.replace('''fc''', '''convolution''') elif ("norm1" in new_name) and ("layernorm1" not in new_name): __lowerCAmelCase = new_name.replace('''norm1''', '''batchnorm_before''') elif ("norm2" in new_name) and ("layernorm2" not in new_name): __lowerCAmelCase = new_name.replace('''norm2''', '''batchnorm_after''') if "proj" in new_name: __lowerCAmelCase = new_name.replace('''proj''', '''projection''') if "dist_head" in new_name: __lowerCAmelCase = new_name.replace('''dist_head''', '''distillation_classifier''') elif "head" in new_name: __lowerCAmelCase = new_name.replace('''head''', '''classifier''') elif "patch_embed" in new_name: __lowerCAmelCase = '''efficientformer.''' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": __lowerCAmelCase = new_name.replace('''norm''', '''layernorm''') __lowerCAmelCase = '''efficientformer.''' + new_name else: __lowerCAmelCase = '''efficientformer.encoder.''' + new_name return new_name def __magic_name__( lowerCamelCase, lowerCamelCase): for key in checkpoint.copy().keys(): __lowerCAmelCase = checkpoint.pop(lowerCamelCase) __lowerCAmelCase = val return checkpoint def __magic_name__( ): __lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw) return image def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model'''] __lowerCAmelCase = EfficientFormerConfig.from_json_file(lowerCamelCase) __lowerCAmelCase = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase) __lowerCAmelCase = '''_'''.join(checkpoint_path.split('''/''')[-1].split('''.''')[0].split('''_''')[:-1]) __lowerCAmelCase = config.depths[-1] - config.num_metaad_blocks + 1 __lowerCAmelCase = convert_torch_checkpoint(lowerCamelCase, lowerCamelCase) model.load_state_dict(lowerCamelCase) model.eval() __lowerCAmelCase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } # prepare image __lowerCAmelCase = prepare_img() __lowerCAmelCase = 2_5_6 __lowerCAmelCase = 2_2_4 __lowerCAmelCase = EfficientFormerImageProcessor( size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], ) __lowerCAmelCase = processor(images=lowerCamelCase, return_tensors='''pt''').pixel_values # original processing pipeline __lowerCAmelCase = Compose( [ Resize(lowerCamelCase, interpolation=pillow_resamplings['''bicubic''']), CenterCrop(lowerCamelCase), ToTensor(), Normalize(lowerCamelCase, lowerCamelCase), ]) __lowerCAmelCase = image_transforms(lowerCamelCase).unsqueeze(0) assert torch.allclose(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = model(lowerCamelCase) __lowerCAmelCase = outputs.logits __lowerCAmelCase = (1, 1_0_0_0) if "l1" in model_name: __lowerCAmelCase = torch.Tensor( [-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28]) assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3) assert logits.shape == expected_shape elif "l3" in model_name: __lowerCAmelCase = torch.Tensor( [-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27]) assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3) assert logits.shape == expected_shape elif "l7" in model_name: __lowerCAmelCase = torch.Tensor( [-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78]) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""") # Save Checkpoints Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) model.save_pretrained(lowerCamelCase) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""") processor.save_pretrained(lowerCamelCase) print(F"""Processor successfuly saved at {pytorch_dump_path}""") if push_to_hub: print('''Pushing model to the hub...''') model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=lowerCamelCase, ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=lowerCamelCase, ) if __name__ == "__main__": _UpperCAmelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) _UpperCAmelCase : List[str] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
9
0
'''simple docstring''' import re def __magic_name__( lowerCamelCase): return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''', str_)] def __magic_name__( lowerCamelCase): __lowerCAmelCase = split_input(str_) return "".join( [''''''.join([char.capitalize() for char in sub_str]) for sub_str in string_split]) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): try: __lowerCAmelCase = split_input(_snake_case) if upper: __lowerCAmelCase = ''''''.join( [ separator.join([char.upper() for char in sub_str]) for sub_str in string_split ]) else: __lowerCAmelCase = ''''''.join( [ separator.join([char.lower() for char in sub_str]) for sub_str in string_split ]) return res_str except IndexError: return "not valid string" def __magic_name__( lowerCamelCase): return to_simple_case(_snake_case) def __magic_name__( lowerCamelCase): try: __lowerCAmelCase = to_simple_case(_snake_case) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def __magic_name__( lowerCamelCase, lowerCamelCase): return to_complex_case(_snake_case, _snake_case, '''_''') def __magic_name__( lowerCamelCase, lowerCamelCase): return to_complex_case(_snake_case, _snake_case, '''-''') if __name__ == "__main__": __import__("""doctest""").testmod()
351
'''simple docstring''' from __future__ import annotations import math def __magic_name__( lowerCamelCase, lowerCamelCase): if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2: raise Exception('''Matrices are not 2x2''') __lowerCAmelCase = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def __magic_name__( lowerCamelCase, lowerCamelCase): return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(lowerCamelCase)) ] def __magic_name__( lowerCamelCase, lowerCamelCase): return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(lowerCamelCase)) ] def __magic_name__( lowerCamelCase): if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0: raise Exception('''Odd matrices are not supported!''') __lowerCAmelCase = len(lowerCamelCase) __lowerCAmelCase = matrix_length // 2 __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)] __lowerCAmelCase = [ [a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase) ] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)] return top_left, top_right, bot_left, bot_right def __magic_name__( lowerCamelCase): return len(lowerCamelCase), len(matrix[0]) def __magic_name__( lowerCamelCase): print('''\n'''.join(str(lowerCamelCase) for line in matrix)) def __magic_name__( lowerCamelCase, lowerCamelCase): if matrix_dimensions(lowerCamelCase) == (2, 2): return default_matrix_multiplication(lowerCamelCase, lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase) __lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase) __lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase) __lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase) # construct the new matrix from our 4 quadrants __lowerCAmelCase = [] for i in range(len(lowerCamelCase)): new_matrix.append(top_left[i] + top_right[i]) for i in range(len(lowerCamelCase)): new_matrix.append(bot_left[i] + bot_right[i]) return new_matrix def __magic_name__( lowerCamelCase, lowerCamelCase): if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]: __lowerCAmelCase = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(lowerCamelCase) __lowerCAmelCase = matrix_dimensions(lowerCamelCase) __lowerCAmelCase = matrix_dimensions(lowerCamelCase) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase) __lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase)))) __lowerCAmelCase = matrixa __lowerCAmelCase = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0, lowerCamelCase): if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): new_matrixa[i].append(0) else: new_matrixa.append([0] * maxim) if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): new_matrixa[i].append(0) else: new_matrixa.append([0] * maxim) __lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase) # Removing the additional zeros for i in range(0, lowerCamelCase): if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": _UpperCAmelCase : List[str] = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] _UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]] print(strassen(matrixa, matrixa))
9
0
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = False): if radian_mode: return [magnitude * cos(_A), magnitude * sin(_A)] return [magnitude * cos(radians(_A)), magnitude * sin(radians(_A))] def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1_0**-1): __lowerCAmelCase = cross(_A, _A) __lowerCAmelCase = sum(_A) return abs(_A) < eps if __name__ == "__main__": # Test to check if it works _UpperCAmelCase : Tuple = array( [ polar_force(7_1_8.4, 1_8_0 - 3_0), polar_force(8_7_9.5_4, 4_5), polar_force(1_0_0, -9_0), ] ) _UpperCAmelCase : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg _UpperCAmelCase : List[Any] = array( [ polar_force(3_0 * 9.81, 1_5), polar_force(2_1_5, 1_8_0 - 4_5), polar_force(2_6_4, 9_0 - 3_0), ] ) _UpperCAmelCase : Union[str, Any] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg _UpperCAmelCase : Optional[Any] = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]]) _UpperCAmelCase : Any = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
352
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) __lowerCAmelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowercase , __lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase ) __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowercase ) self.assertIsInstance(processor_fast.tokenizer , __lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowercase ) self.assertIsInstance(processor_fast.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 ) __lowerCAmelCase = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' ) __lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = tokenizer(__lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__lowercase ) __lowerCAmelCase = tokenizer.batch_decode(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
9
0
'''simple docstring''' def __magic_name__( lowerCamelCase, lowerCamelCase): if digit_amount > 0: return round(number - int(lowerCamelCase), lowerCamelCase) return number - int(lowerCamelCase) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.3_45, 1)) print(decimal_isolate(35.3_45, 2)) print(decimal_isolate(35.3_45, 3)) print(decimal_isolate(-14.7_89, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.1_23, 1)) print(decimal_isolate(-14.1_23, 2)) print(decimal_isolate(-14.1_23, 3))
353
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class a__ ( __A ): """simple docstring""" def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ): __lowerCAmelCase = 1.0 if scale is None else scale __lowerCAmelCase = 0.0 if loc is None else loc super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] ) @property def _snake_case (self ): return self.base_dist.mean * self.scale + self.loc @property def _snake_case (self ): return self.base_dist.variance * self.scale**2 @property def _snake_case (self ): return self.variance.sqrt() class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ): super().__init__(**__lowercase ) __lowerCAmelCase = args_dim __lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] ) __lowerCAmelCase = domain_map def _snake_case (self , __lowercase ): __lowerCAmelCase = [proj(__lowercase ) for proj in self.proj] return self.domain_map(*__lowercase ) class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase ): super().__init__() __lowerCAmelCase = function def _snake_case (self , __lowercase , *__lowercase ): return self.function(__lowercase , *__lowercase ) class a__ : """simple docstring""" __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__(self , __lowercase = 1 ): __lowerCAmelCase = dim __lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim} def _snake_case (self , __lowercase ): if self.dim == 1: return self.distribution_class(*__lowercase ) else: return Independent(self.distribution_class(*__lowercase ) , 1 ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ): __lowerCAmelCase = self._base_distribution(__lowercase ) if loc is None and scale is None: return distr else: return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim ) @property def _snake_case (self ): return () if self.dim == 1 else (self.dim,) @property def _snake_case (self ): return len(self.event_shape ) @property def _snake_case (self ): return 0.0 def _snake_case (self , __lowercase ): return ParameterProjection( in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _snake_case (self , *__lowercase ): raise NotImplementedError() @staticmethod def _snake_case (__lowercase ): return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0 class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def _snake_case (cls , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) __lowerCAmelCase = 2.0 + cls.squareplus(__lowercase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def _snake_case (cls , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def _snake_case (cls , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _snake_case (self , __lowercase ): __lowerCAmelCase , __lowerCAmelCase = distr_args if self.dim == 1: return self.distribution_class(total_count=__lowercase , logits=__lowercase ) else: return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ): __lowerCAmelCase , __lowerCAmelCase = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
9
0
'''simple docstring''' import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class a__ ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" __UpperCamelCase : List[Any] = RoCBertTokenizer __UpperCamelCase : Tuple = None __UpperCamelCase : Tuple = False __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Dict = filter_non_english def _snake_case (self ): super().setUp() __lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""] __lowerCAmelCase = {} __lowerCAmelCase = {} for i, value in enumerate(SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = i __lowerCAmelCase = i __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) def _snake_case (self ): __lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __lowerCAmelCase = tokenizer.tokenize('''你好[SEP]你是谁''' ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] ) def _snake_case (self ): __lowerCAmelCase = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def _snake_case (self ): __lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _snake_case (self ): __lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def _snake_case (self ): __lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _snake_case (self ): __lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _snake_case (self ): __lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _snake_case (self ): __lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _snake_case (self ): __lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _snake_case (self ): __lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def _snake_case (self ): __lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] __lowerCAmelCase = {} for i, token in enumerate(SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = i __lowerCAmelCase = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def _snake_case (self ): self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def _snake_case (self ): self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def _snake_case (self ): self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) if self.test_rust_tokenizer: __lowerCAmelCase = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) def _snake_case (self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" __lowerCAmelCase = tokenizer_r.encode_plus( SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , ) __lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , '''do_lower_case''' ) else False __lowerCAmelCase = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def _snake_case (self ): __lowerCAmelCase = ["""的""", """人""", """有"""] __lowerCAmelCase = """""".join(SCREAMING_SNAKE_CASE_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowerCAmelCase = True __lowerCAmelCase = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = False __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) # it is expected that only the first Chinese character is not preceded by "##". __lowerCAmelCase = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_ ) ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case (self ): __lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __lowerCAmelCase = tokenizer.encode('''你好''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer.encode('''你是谁''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def _snake_case (self ): __lowerCAmelCase = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __lowerCAmelCase = """你好,你是谁""" __lowerCAmelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer.prepare_for_model( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
354
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class a__ ( __A ): """simple docstring""" __UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa' __UpperCamelCase : List[str] = ( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) __UpperCamelCase : Optional[int] = 'document_qa' __UpperCamelCase : Optional[int] = AutoProcessor __UpperCamelCase : Tuple = VisionEncoderDecoderModel __UpperCamelCase : Any = ['image', 'text'] __UpperCamelCase : Optional[Any] = ['text'] def __init__(self , *__lowercase , **__lowercase ): if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' __lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase ) __lowerCAmelCase = self.pre_processor.tokenizer( __lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids __lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _snake_case (self , __lowercase ): return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences def _snake_case (self , __lowercase ): __lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0] __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) __lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token __lowerCAmelCase = self.pre_processor.tokenajson(__lowercase ) return sequence["answer"]
9
0
'''simple docstring''' import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument _UpperCAmelCase : List[Any] = { """/attention/""": """/0/SelfAttention/""", """/self_attention/""": """/0/SelfAttention/""", """/encoder_decoder_attention/""": """/1/EncDecAttention/""", """value""": """v""", """query""": """q""", """key""": """k""", """out""": """o""", """pre_self_attention_layer_norm""": """0/layer_norm""", """pre_cross_attention_layer_norm""": """1/layer_norm""", """pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong """token_embedder""": """shared""", """encoder_norm""": """final_layer_norm""", """decoder_norm""": """final_layer_norm""", """relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""", """router/router_weights/w/""": """router/classifier/""", """roer/roer_weights/w/""": """router/classifier/""", """logits_dense""": """lm_head""", } def __magic_name__( lowerCamelCase): __lowerCAmelCase = list(s_dict.keys()) for key in keys: __lowerCAmelCase = R""".*/layers_(\d+)""" __lowerCAmelCase = key if re.match(lowercase__, lowercase__): __lowerCAmelCase = re.sub(r'''layers_(\d+)''', r'''block/\1/layer''', lowercase__) __lowerCAmelCase = R"""(encoder|decoder)\/""" if re.match(lowercase__, lowercase__): __lowerCAmelCase = re.match(lowercase__, lowercase__).groups() if groups[0] == "encoder": __lowerCAmelCase = re.sub(r'''/mlp/''', r'''/1/mlp/''', lowercase__) __lowerCAmelCase = re.sub(r'''/pre_mlp_layer_norm/''', r'''/1/layer_norm/''', lowercase__) elif groups[0] == "decoder": __lowerCAmelCase = re.sub(r'''/mlp/''', r'''/2/mlp/''', lowercase__) __lowerCAmelCase = re.sub(r'''/pre_mlp_layer_norm/''', r'''/2/layer_norm/''', lowercase__) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: __lowerCAmelCase = new_key.replace(lowercase__, lowercase__) print(F"""{key} -> {new_key}""") __lowerCAmelCase = s_dict.pop(lowercase__) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __lowerCAmelCase = s_dict[ """encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __lowerCAmelCase = s_dict[ """decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys()): if "expert" in key: __lowerCAmelCase = s_dict[key].shape[0] __lowerCAmelCase = s_dict[key] for idx in range(lowercase__): __lowerCAmelCase = expert_weihts[idx] print(F"""{key} -> {key.replace("expert/", "nested fstring")}""") s_dict.pop(lowercase__) return s_dict _UpperCAmelCase : str = { """NUM_ENCODER_LAYERS""": """num_layers""", """NUM_DECODER_LAYERS""": """num_decoder_layers""", """NUM_HEADS""": """num_heads""", """HEAD_DIM""": """d_kv""", """EMBED_DIM""": """d_model""", """MLP_DIM""": """d_ff""", """NUM_SELECTED_EXPERTS""": """num_selected_experts""", """NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""", """NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""", """dense.MlpBlock.activations""": """feed_forward_proj""", } def __magic_name__( lowerCamelCase, lowerCamelCase): import regex as re with open(lowercase__, '''r''') as f: __lowerCAmelCase = f.read() __lowerCAmelCase = re.findall(r'''(.*) = ([0-9.]*)''', lowercase__) __lowerCAmelCase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": __lowerCAmelCase = float(lowercase__) if """.""" in value else int(lowercase__) __lowerCAmelCase = re.findall(r'''(.*activations) = \(\'(.*)\',\)''', lowercase__)[0] __lowerCAmelCase = str(activation[1]) __lowerCAmelCase = num_experts __lowerCAmelCase = SwitchTransformersConfig(**lowercase__) return config def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase="./", lowerCamelCase=8): print(F"""Loading flax weights from : {flax_checkpoint_path}""") __lowerCAmelCase = checkpoints.load_tax_checkpoint(lowercase__) if gin_file is not None: __lowerCAmelCase = convert_gin_to_config(lowercase__, lowercase__) else: __lowerCAmelCase = SwitchTransformersConfig.from_pretrained(lowercase__) __lowerCAmelCase = SwitchTransformersForConditionalGeneration(lowercase__) __lowerCAmelCase = flax_params["""target"""] __lowerCAmelCase = flatten_dict(lowercase__, sep='''/''') __lowerCAmelCase = rename_keys(lowercase__) __lowerCAmelCase = unflatten_dict(lowercase__, sep='''/''') # Load the flax params in the PT model load_flax_weights_in_pytorch_model(lowercase__, lowercase__) print(F"""Save PyTorch model to {pytorch_dump_path}""") pt_model.save_pretrained(lowercase__) if __name__ == "__main__": _UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the""" """ model architecture. If not provided, a `gin_file` has to be provided.""" ), ) parser.add_argument( """--gin_file""", default=None, type=str, required=False, help="""Path to the gin config file. If not provided, a `config_file` has to be passed """, ) parser.add_argument( """--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model.""" ) parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""") _UpperCAmelCase : List[Any] = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
355
'''simple docstring''' def __magic_name__( lowerCamelCase): __lowerCAmelCase = 1 __lowerCAmelCase = 2 while i * i <= n: __lowerCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __magic_name__( ): __lowerCAmelCase = 1 __lowerCAmelCase = 1 while True: i += 1 t_num += i if count_divisors(lowerCamelCase) > 5_0_0: break return t_num if __name__ == "__main__": print(solution())
9
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Optional[int] = logging.get_logger(__name__) _UpperCAmelCase : List[Any] = { """uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""", """uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""", """uclanlp/visualbert-vqa-coco-pre""": ( """https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json""" ), """uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""", """uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""", """uclanlp/visualbert-vcr-coco-pre""": ( """https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json""" ), """uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""", """uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""", """uclanlp/visualbert-nlvr2-coco-pre""": ( """https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json""" ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class a__ ( __lowerCamelCase ): """simple docstring""" __UpperCamelCase : Union[str, Any] = 'visual_bert' def __init__(self , __lowercase=3_05_22 , __lowercase=7_68 , __lowercase=5_12 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=False , __lowercase=True , __lowercase=1 , __lowercase=0 , __lowercase=2 , **__lowercase , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCAmelCase = vocab_size __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = hidden_size __lowerCAmelCase = visual_embedding_dim __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = initializer_range __lowerCAmelCase = type_vocab_size __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = bypass_transformer __lowerCAmelCase = special_visual_initialize
356
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) self.assertTrue(isinstance(dc.token_ids , __lowercase ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _snake_case (self ): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __lowerCAmelCase = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowercase ): DisjunctiveConstraint(__lowercase ) # fails here def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 ) __lowerCAmelCase = stepped is True and completed is True and reset is False self.assertTrue(__lowercase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
9
0
'''simple docstring''' from __future__ import annotations import collections import pprint from pathlib import Path def __magic_name__( lowerCamelCase): return "".join(sorted(lowerCamelCase)) def __magic_name__( lowerCamelCase): return word_by_signature[signature(lowerCamelCase)] _UpperCAmelCase : int = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") _UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()}) _UpperCAmelCase : List[Any] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _UpperCAmelCase : Tuple = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
357
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets _UpperCAmelCase : List[str] = """ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ _UpperCAmelCase : str = """ Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric(\"mean_iou\") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ _UpperCAmelCase : Tuple = """\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }""" def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): if label_map is not None: for old_id, new_id in label_map.items(): __lowerCAmelCase = new_id # turn into Numpy arrays __lowerCAmelCase = np.array(lowerCamelCase) __lowerCAmelCase = np.array(lowerCamelCase) if reduce_labels: __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label - 1 __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label != ignore_index __lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = pred_label[mask] __lowerCAmelCase = np.array(lowerCamelCase)[mask] __lowerCAmelCase = pred_label[pred_label == label] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) # compute metrics __lowerCAmelCase = {} __lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum() __lowerCAmelCase = total_area_intersect / total_area_union __lowerCAmelCase = total_area_intersect / total_area_label __lowerCAmelCase = np.nanmean(lowerCamelCase) __lowerCAmelCase = np.nanmean(lowerCamelCase) __lowerCAmelCase = all_acc __lowerCAmelCase = iou __lowerCAmelCase = acc if nan_to_num is not None: __lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def _snake_case (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { '''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), '''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), } ) , reference_urls=[ '''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py''' ] , ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ): __lowerCAmelCase = mean_iou( results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , ) return iou_result
9
0
'''simple docstring''' import argparse _UpperCAmelCase : List[Any] = "docs/source/_static/js/custom.js" def __magic_name__( lowerCamelCase): with open(_UpperCamelCase, encoding='''utf-8''', newline='''\n''') as f: __lowerCAmelCase = f.readlines() __lowerCAmelCase = 0 # First let's put the right version while not lines[index].startswith('''const stableVersion ='''): index += 1 __lowerCAmelCase = F"""const stableVersion = \"v{version}\"\n""" # Then update the dictionary while not lines[index].startswith('''const versionMapping = {'''): index += 1 # We go until the end while not lines[index].startswith('''}'''): index += 1 # We add the new version at the end lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n""" with open(_UpperCamelCase, '''w''', encoding='''utf-8''', newline='''\n''') as f: f.writelines(_UpperCamelCase) if __name__ == "__main__": _UpperCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--version""", help="""Release version.""") _UpperCAmelCase : List[Any] = parser.parse_args() update_custom_js(args.version)
358
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : str = DebertaTokenizer __UpperCamelCase : str = True __UpperCamelCase : Any = DebertaTokenizerFast def _snake_case (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCAmelCase = {'''unk_token''': '''[UNK]'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) def _snake_case (self , **__lowercase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = '''lower newer''' return input_text, output_text def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowerCAmelCase = tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) __lowerCAmelCase = tokens + [tokenizer.unk_token] __lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = tokenizer('''Hello''' , '''World''' ) __lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __lowercase ) @slow def _snake_case (self ): __lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case (self ): __lowerCAmelCase = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] __lowerCAmelCase = tokenizer(__lowercase , padding=__lowercase ) __lowerCAmelCase = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']] # fmt: off __lowerCAmelCase = { '''input_ids''': [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __lowercase ) for expected, decoded in zip(__lowercase , __lowercase ): self.assertEqual(__lowercase , __lowercase )
9
0
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = RobertaTokenizer __UpperCamelCase : Dict = RobertaTokenizerFast __UpperCamelCase : List[str] = True __UpperCamelCase : List[str] = {'cls_token': '<s>'} def _snake_case (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowerCAmelCase ) ) def _snake_case (self , **__lowercase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def _snake_case (self , **__lowercase ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = '''lower newer''' return input_text, output_text def _snake_case (self ): __lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __lowerCAmelCase = tokens + [tokenizer.unk_token] __lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase ) def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__lowerCAmelCase ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__lowerCAmelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def _snake_case (self ): __lowerCAmelCase = self.tokenizer_class.from_pretrained('''roberta-base''' ) __lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase ) __lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = '''Encode this sequence.''' __lowerCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments __lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase ) __lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) __lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase ) # Testing spaces after special tokens __lowerCAmelCase = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space __lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) __lowerCAmelCase = '''Encode <mask> sequence''' __lowerCAmelCase = '''Encode <mask>sequence''' __lowerCAmelCase = tokenizer.encode(__lowerCAmelCase ) __lowerCAmelCase = encoded.index(__lowerCAmelCase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) __lowerCAmelCase = tokenizer.encode(__lowerCAmelCase ) __lowerCAmelCase = encoded.index(__lowerCAmelCase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase ) def _snake_case (self ): pass def _snake_case (self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __lowerCAmelCase = '''A, <mask> AllenNLP sentence.''' __lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) __lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) __lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) __lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( __lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( __lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def _snake_case (self ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __lowerCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __lowerCAmelCase ) self.assertEqual(post_processor_state['''add_prefix_space'''] , __lowerCAmelCase ) self.assertEqual(post_processor_state['''trim_offsets'''] , __lowerCAmelCase ) def _snake_case (self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowerCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` __lowerCAmelCase = F"""{text_of_1_token} {text_of_1_token}""" __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __lowerCAmelCase = F""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __lowerCAmelCase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
359
'''simple docstring''' import argparse import datetime def __magic_name__( lowerCamelCase): __lowerCAmelCase = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } __lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowerCamelCase) < 1_1: raise ValueError('''Must be 10 characters long''') # Get month __lowerCAmelCase = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 1_3: raise ValueError('''Month must be between 1 - 12''') __lowerCAmelCase = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get day __lowerCAmelCase = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 3_2: raise ValueError('''Date must be between 1 - 31''') # Get second separator __lowerCAmelCase = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get year __lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 4_5 < y < 8_5_0_0: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''') # Get datetime obj for validation __lowerCAmelCase = datetime.date(int(lowerCamelCase), int(lowerCamelCase), int(lowerCamelCase)) # Start math if m <= 2: __lowerCAmelCase = y - 1 __lowerCAmelCase = m + 1_2 # maths var __lowerCAmelCase = int(str(lowerCamelCase)[:2]) __lowerCAmelCase = int(str(lowerCamelCase)[2:]) __lowerCAmelCase = int(2.6 * m - 5.39) __lowerCAmelCase = int(c / 4) __lowerCAmelCase = int(k / 4) __lowerCAmelCase = int(d + k) __lowerCAmelCase = int(t + u + v + x) __lowerCAmelCase = int(z - (2 * c)) __lowerCAmelCase = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''') # Response __lowerCAmelCase = F"""Your date {date_input}, is a {days[str(lowerCamelCase)]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : List[str] = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) _UpperCAmelCase : Dict = parser.parse_args() zeller(args.date_input)
9
0
'''simple docstring''' _UpperCAmelCase : Tuple = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
360
'''simple docstring''' import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = ConsistencyModelPipeline __UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS __UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt __UpperCamelCase : List[Any] = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) @property def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def _snake_case (self , __lowercase=False ): if class_cond: __lowerCAmelCase = self.dummy_cond_unet else: __lowerCAmelCase = self.dummy_uncond_unet # Default to CM multistep sampler __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, } return components def _snake_case (self , __lowercase , __lowercase=0 ): if str(__lowercase ).startswith('''mps''' ): __lowerCAmelCase = torch.manual_seed(__lowercase ) else: __lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCAmelCase = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase ) __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 0 __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase ) __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = 0 __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case (self , __lowercase=0 , __lowercase=False , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ): __lowerCAmelCase = torch.manual_seed(__lowercase ) __lowerCAmelCase = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: __lowerCAmelCase = self.get_fixed_latents(seed=__lowercase , device=__lowercase , dtype=__lowercase , shape=__lowercase ) __lowerCAmelCase = latents return inputs def _snake_case (self , __lowercase=0 , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ): if type(__lowercase ) == str: __lowerCAmelCase = torch.device(__lowercase ) __lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase ) return latents def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs() __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs() __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ): __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ): __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
9
0
'''simple docstring''' def __magic_name__( lowerCamelCase, lowerCamelCase): return base * power(lowercase_, (exponent - 1)) if exponent else 1 if __name__ == "__main__": print("""Raise base to the power of exponent using recursion...""") _UpperCAmelCase : Optional[int] = int(input("""Enter the base: """).strip()) _UpperCAmelCase : Tuple = int(input("""Enter the exponent: """).strip()) _UpperCAmelCase : Optional[int] = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents _UpperCAmelCase : Tuple = 1 / result print(f"""{base} to the power of {exponent} is {result}""")
361
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split _UpperCAmelCase : List[Any] = datasets.load_iris() _UpperCAmelCase : Dict = np.array(data["""data"""]) _UpperCAmelCase : int = np.array(data["""target"""]) _UpperCAmelCase : str = data["""target_names"""] _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = train_test_split(X, y) def __magic_name__( lowerCamelCase, lowerCamelCase): return np.linalg.norm(np.array(lowerCamelCase) - np.array(lowerCamelCase)) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=5): __lowerCAmelCase = zip(lowerCamelCase, lowerCamelCase) # List of distances of all points from the point to be classified __lowerCAmelCase = [] for data_point in data: __lowerCAmelCase = euclidean_distance(data_point[0], lowerCamelCase) distances.append((distance, data_point[1])) # Choosing 'k' points with the least distances. __lowerCAmelCase = [i[1] for i in sorted(lowerCamelCase)[:k]] # Most commonly occurring class among them # is the class into which the point is classified __lowerCAmelCase = Counter(lowerCamelCase).most_common(1)[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
9
0
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class a__ ( __A ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() __lowerCAmelCase = 5 # Realm tok __lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __lowerCAmelCase = os.path.join(self.tmpdirname , '''realm_tokenizer''' ) os.makedirs(_a , exist_ok=_a ) __lowerCAmelCase = os.path.join(_a , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __lowerCAmelCase = os.path.join(self.tmpdirname , '''realm_block_records''' ) os.makedirs(_a , exist_ok=_a ) def _snake_case (self ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = RealmConfig(num_block_records=self.num_block_records ) return config def _snake_case (self ): __lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''question''': ['''foo''', '''bar'''], '''answers''': [['''Foo''', '''Bar'''], ['''Bar''']], } ) return dataset def _snake_case (self ): __lowerCAmelCase = np.array( [ b'''This is the first record''', b'''This is the second record''', b'''This is the third record''', b'''This is the fourth record''', b'''This is the fifth record''', b'''This is a longer longer longer record''', ] , dtype=_a , ) return block_records def _snake_case (self ): __lowerCAmelCase = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def _snake_case (self ): __lowerCAmelCase = self.get_config() __lowerCAmelCase = self.get_dummy_retriever() __lowerCAmelCase = retriever.tokenizer __lowerCAmelCase = np.array([0, 3] , dtype='''long''' ) __lowerCAmelCase = tokenizer(['''Test question'''] ).input_ids __lowerCAmelCase = tokenizer( ['''the fourth'''] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids __lowerCAmelCase = config.reader_seq_len __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = retriever( _a , _a , answer_ids=_a , max_length=_a , return_tensors='''np''' ) self.assertEqual(len(_a ) , 2 ) self.assertEqual(len(_a ) , 2 ) self.assertEqual(len(_a ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , ) def _snake_case (self ): __lowerCAmelCase = self.get_config() __lowerCAmelCase = self.get_dummy_retriever() __lowerCAmelCase = retriever.tokenizer __lowerCAmelCase = np.array([0, 3, 5] , dtype='''long''' ) __lowerCAmelCase = tokenizer(['''Test question'''] ).input_ids __lowerCAmelCase = tokenizer( ['''the fourth''', '''longer longer'''] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids __lowerCAmelCase = config.reader_seq_len __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = retriever( _a , _a , answer_ids=_a , max_length=_a , return_tensors='''np''' ) self.assertEqual([False, True, True] , _a ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _a ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _a ) def _snake_case (self ): __lowerCAmelCase = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) # Test local path __lowerCAmelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' ) # Test mocked remote path with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download: __lowerCAmelCase = os.path.join( os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME ) __lowerCAmelCase = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
362
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) __lowerCAmelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowercase , __lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase ) def _snake_case (self , **__lowercase ): return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase ) __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowercase ) self.assertIsInstance(processor_fast.tokenizer , __lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowercase ) self.assertIsInstance(processor_fast.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase ) __lowerCAmelCase = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' ) __lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__lowercase , return_tensors='''np''' ) __lowerCAmelCase = tokenizer(__lowercase , return_tensors='''np''' ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = ['''cat''', '''nasa badge'''] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = [['''cat''', '''nasa badge'''], ['''person''']] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 __lowerCAmelCase = len(__lowercase ) __lowerCAmelCase = max([len(__lowercase ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = ['''cat''', '''nasa badge'''] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 __lowerCAmelCase = inputs['''input_ids'''] __lowerCAmelCase = [ [4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(images=__lowercase , query_images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__lowercase ) __lowerCAmelCase = tokenizer.batch_decode(__lowercase ) self.assertListEqual(__lowercase , __lowercase )
9
0
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean _UpperCAmelCase : Optional[Any] = 0 _UpperCAmelCase : str = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _UpperCAmelCase : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right _UpperCAmelCase : int = tuple[int, int] class a__ : """simple docstring""" def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ): __lowerCAmelCase = pos_x __lowerCAmelCase = pos_y __lowerCAmelCase = (pos_y, pos_x) __lowerCAmelCase = goal_x __lowerCAmelCase = goal_y __lowerCAmelCase = g_cost __lowerCAmelCase = parent __lowerCAmelCase = self.calculate_heuristic() __lowerCAmelCase = self.g_cost + self.h_cost def _snake_case (self ): __lowerCAmelCase = self.pos_x - self.goal_x __lowerCAmelCase = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(_SCREAMING_SNAKE_CASE ) + abs(_SCREAMING_SNAKE_CASE ) else: return sqrt(dy**2 + dx**2 ) def __lt__(self , __lowercase ): return self.f_cost < other.f_cost class a__ : """simple docstring""" def __init__(self , __lowercase , __lowercase ): __lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase = [self.start] __lowerCAmelCase = [] __lowerCAmelCase = False def _snake_case (self ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __lowerCAmelCase = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(_SCREAMING_SNAKE_CASE ) self.closed_nodes.append(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.get_successors(_SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: # retrieve the best current path __lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) return [self.start.pos] def _snake_case (self , __lowercase ): __lowerCAmelCase = [] for action in delta: __lowerCAmelCase = parent.pos_x + action[1] __lowerCAmelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) ) return successors def _snake_case (self , __lowercase ): __lowerCAmelCase = node __lowerCAmelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __lowerCAmelCase = current_node.parent path.reverse() return path class a__ : """simple docstring""" def __init__(self , __lowercase , __lowercase ): __lowerCAmelCase = AStar(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase = AStar(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase = False def _snake_case (self ): while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() __lowerCAmelCase = self.fwd_astar.open_nodes.pop(0 ) __lowerCAmelCase = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.fwd_astar.closed_nodes.append(_SCREAMING_SNAKE_CASE ) self.bwd_astar.closed_nodes.append(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase = current_bwd_node __lowerCAmelCase = current_fwd_node __lowerCAmelCase = { self.fwd_astar: self.fwd_astar.get_successors(_SCREAMING_SNAKE_CASE ), self.bwd_astar: self.bwd_astar.get_successors(_SCREAMING_SNAKE_CASE ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: # retrieve the best current path __lowerCAmelCase = astar.open_nodes.pop( astar.open_nodes.index(_SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: astar.open_nodes.append(_SCREAMING_SNAKE_CASE ) return [self.fwd_astar.start.pos] def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = self.fwd_astar.retrace_path(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.bwd_astar.retrace_path(_SCREAMING_SNAKE_CASE ) bwd_path.pop() bwd_path.reverse() __lowerCAmelCase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] _UpperCAmelCase : List[str] = (0, 0) _UpperCAmelCase : Dict = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) _UpperCAmelCase : int = time.time() _UpperCAmelCase : Tuple = AStar(init, goal) _UpperCAmelCase : Dict = a_star.search() _UpperCAmelCase : Dict = time.time() - start_time print(f"""AStar execution time = {end_time:f} seconds""") _UpperCAmelCase : List[Any] = time.time() _UpperCAmelCase : Union[str, Any] = BidirectionalAStar(init, goal) _UpperCAmelCase : List[Any] = time.time() - bd_start_time print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
363
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __magic_name__( ): __lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)] __lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0) return (arr, r) _UpperCAmelCase : Dict = make_dataset() def __magic_name__( lowerCamelCase, lowerCamelCase): for triplet in permutations(lowerCamelCase, 3): if sum(lowerCamelCase) == target: return tuple(sorted(lowerCamelCase)) return (0, 0, 0) def __magic_name__( lowerCamelCase, lowerCamelCase): arr.sort() __lowerCAmelCase = len(lowerCamelCase) for i in range(n - 1): __lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __magic_name__( ): __lowerCAmelCase = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' __lowerCAmelCase = ''' triplet_sum1(*dataset) ''' __lowerCAmelCase = ''' triplet_sum2(*dataset) ''' __lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0) __lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0) return (min(lowerCamelCase), min(lowerCamelCase)) if __name__ == "__main__": from doctest import testmod testmod() _UpperCAmelCase : Union[str, Any] = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
9
0
'''simple docstring''' from __future__ import annotations from collections.abc import MutableSequence class a__ : """simple docstring""" def __init__(self , __lowercase , __lowercase ): if len(UpperCAmelCase_ ) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''' ) __lowerCAmelCase = list(UpperCAmelCase_ ) __lowerCAmelCase = degree def __add__(self , __lowercase ): if self.degree > polynomial_a.degree: __lowerCAmelCase = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , UpperCAmelCase_ ) else: __lowerCAmelCase = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , UpperCAmelCase_ ) def __sub__(self , __lowercase ): return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__(self ): return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__(self , __lowercase ): __lowerCAmelCase = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , UpperCAmelCase_ ) def _snake_case (self , __lowercase ): __lowerCAmelCase = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__(self ): __lowerCAmelCase = "" for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCAmelCase_ ) return polynomial def __repr__(self ): return self.__str__() def _snake_case (self ): __lowerCAmelCase = [0] * self.degree for i in range(self.degree ): __lowerCAmelCase = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , UpperCAmelCase_ ) def _snake_case (self , __lowercase = 0 ): __lowerCAmelCase = [0] * (self.degree + 2) __lowerCAmelCase = constant for i in range(self.degree + 1 ): __lowerCAmelCase = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , UpperCAmelCase_ ) def __eq__(self , __lowercase ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__(self , __lowercase ): return not self.__eq__(UpperCAmelCase_ )
364
'''simple docstring''' import numpy as np def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1E-12, lowerCamelCase = 1_0_0, ): assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[1] # Ensure proper dimensionality. assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(lowerCamelCase) == np.iscomplexobj(lowerCamelCase) __lowerCAmelCase = np.iscomplexobj(lowerCamelCase) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(lowerCamelCase, input_matrix.conj().T) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __lowerCAmelCase = False __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 1E12 while not convergence: # Multiple matrix by the vector. __lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase) # Normalize the resulting output vector. __lowerCAmelCase = w / np.linalg.norm(lowerCamelCase) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __lowerCAmelCase = vector.conj().T if is_complex else vector.T __lowerCAmelCase = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase)) # Check convergence. __lowerCAmelCase = np.abs(lambda_ - lambda_previous) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __lowerCAmelCase = True __lowerCAmelCase = lambda_ if is_complex: __lowerCAmelCase = np.real(lambda_) return lambda_, vector def __magic_name__( ): __lowerCAmelCase = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]]) __lowerCAmelCase = np.array([4_1, 4, 2_0]) __lowerCAmelCase = real_input_matrix.astype(np.complexaaa) __lowerCAmelCase = np.triu(1J * complex_input_matrix, 1) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __lowerCAmelCase = np.array([4_1, 4, 2_0]).astype(np.complexaaa) for problem_type in ["real", "complex"]: if problem_type == "real": __lowerCAmelCase = real_input_matrix __lowerCAmelCase = real_vector elif problem_type == "complex": __lowerCAmelCase = complex_input_matrix __lowerCAmelCase = complex_vector # Our implementation. __lowerCAmelCase , __lowerCAmelCase = power_iteration(lowerCamelCase, lowerCamelCase) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(lowerCamelCase) # Last eigenvalue is the maximum one. __lowerCAmelCase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __lowerCAmelCase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(lowerCamelCase) - np.abs(lowerCamelCase)) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
9
0
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class a__ : """simple docstring""" def __init__(self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=10 , __lowercase=3 , __lowercase=32 * 8 , __lowercase=32 * 8 , __lowercase=4 , __lowercase=64 , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = is_training __lowerCAmelCase = use_auxiliary_loss __lowerCAmelCase = num_queries __lowerCAmelCase = num_channels __lowerCAmelCase = min_size __lowerCAmelCase = max_size __lowerCAmelCase = num_labels __lowerCAmelCase = hidden_dim __lowerCAmelCase = hidden_dim def _snake_case (self ): __lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowerCAmelCase__ ) __lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase__ ) __lowerCAmelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase__ ) > 0.5 ).float() __lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase__ ) > 0.5).long() __lowerCAmelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _snake_case (self ): __lowerCAmelCase = MaskaFormerConfig( hidden_size=self.hidden_dim , ) __lowerCAmelCase = self.num_queries __lowerCAmelCase = self.num_labels __lowerCAmelCase = [1, 1, 1, 1] __lowerCAmelCase = self.num_channels __lowerCAmelCase = 64 __lowerCAmelCase = 1_28 __lowerCAmelCase = self.hidden_dim __lowerCAmelCase = self.hidden_dim __lowerCAmelCase = self.hidden_dim return config def _snake_case (self ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = output.encoder_hidden_states __lowerCAmelCase = output.pixel_decoder_hidden_states __lowerCAmelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCAmelCase__ ) , config.decoder_layers ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase=False ): with torch.no_grad(): __lowerCAmelCase = MaskaFormerModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __lowerCAmelCase = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ ) __lowerCAmelCase = model(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowerCAmelCase__ , lowerCAmelCase__ ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = MaskaFormerForUniversalSegmentation(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() def comm_check_on_output(__lowercase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __lowerCAmelCase = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ ) __lowerCAmelCase = model(lowerCAmelCase__ ) comm_check_on_output(lowerCAmelCase__ ) __lowerCAmelCase = model( pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ) comm_check_on_output(lowerCAmelCase__ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class a__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : str = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCamelCase : Optional[Any] = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {} __UpperCamelCase : int = False __UpperCamelCase : Optional[int] = False __UpperCamelCase : Dict = False __UpperCamelCase : str = False def _snake_case (self ): __lowerCAmelCase = MaskaFormerModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ ) def _snake_case (self ): self.config_tester.run_common_tests() def _snake_case (self ): __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCAmelCase__ ) @unittest.skip(reason='''Mask2Former does not use inputs_embeds''' ) def _snake_case (self ): pass @unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' ) def _snake_case (self ): pass @unittest.skip(reason='''Mask2Former is not a generative model''' ) def _snake_case (self ): pass @unittest.skip(reason='''Mask2Former does not use token embeddings''' ) def _snake_case (self ): pass @require_torch_multi_gpu @unittest.skip( reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def _snake_case (self ): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _snake_case (self ): pass def _snake_case (self ): __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(lowerCAmelCase__ ) __lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) @slow def _snake_case (self ): for model_name in ["facebook/mask2former-swin-small-coco-instance"]: __lowerCAmelCase = MaskaFormerModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def _snake_case (self ): __lowerCAmelCase = (self.model_tester.min_size,) * 2 __lowerCAmelCase = { '''pixel_values''': torch.randn((2, 3, *size) , device=lowerCAmelCase__ ), '''mask_labels''': torch.randn((2, 10, *size) , device=lowerCAmelCase__ ), '''class_labels''': torch.zeros(2 , 10 , device=lowerCAmelCase__ ).long(), } __lowerCAmelCase = self.model_tester.get_config() __lowerCAmelCase = MaskaFormerForUniversalSegmentation(lowerCAmelCase__ ).to(lowerCAmelCase__ ) __lowerCAmelCase = model(**lowerCAmelCase__ ) self.assertTrue(outputs.loss is not None ) def _snake_case (self ): __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ ) def _snake_case (self ): __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ ) __lowerCAmelCase = model(**lowerCAmelCase__ , output_attentions=lowerCAmelCase__ ) self.assertTrue(outputs.attentions is not None ) def _snake_case (self ): if not self.model_tester.is_training: return __lowerCAmelCase = self.all_model_classes[1] __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() __lowerCAmelCase = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.train() __lowerCAmelCase = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).loss loss.backward() def _snake_case (self ): __lowerCAmelCase = self.all_model_classes[1] __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() __lowerCAmelCase = True __lowerCAmelCase = True __lowerCAmelCase = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ ) model.train() __lowerCAmelCase = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ) __lowerCAmelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __lowerCAmelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() __lowerCAmelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __lowerCAmelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowerCAmelCase__ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _UpperCAmelCase : Tuple = 1E-4 def __magic_name__( ): __lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') return image @require_vision @slow class a__ ( unittest.TestCase ): """simple docstring""" @cached_property def _snake_case (self ): return "facebook/mask2former-swin-small-coco-instance" @cached_property def _snake_case (self ): return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _snake_case (self ): __lowerCAmelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ ) __lowerCAmelCase = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCAmelCase__ , (1, 3, 3_84, 3_84) ) with torch.no_grad(): __lowerCAmelCase = model(**lowerCAmelCase__ ) __lowerCAmelCase = torch.tensor( [[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCAmelCase__ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) __lowerCAmelCase = torch.tensor( [[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCAmelCase__ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) __lowerCAmelCase = torch.tensor( [[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCAmelCase__ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _snake_case (self ): __lowerCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ ).eval() __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ ) __lowerCAmelCase = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCAmelCase__ , (1, 3, 3_84, 3_84) ) with torch.no_grad(): __lowerCAmelCase = model(**lowerCAmelCase__ ) # masks_queries_logits __lowerCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) __lowerCAmelCase = [ [-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1], [-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1], [-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5], ] __lowerCAmelCase = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) # class_queries_logits __lowerCAmelCase = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) __lowerCAmelCase = torch.tensor( [ [1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2], [0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3], [0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5], ] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _snake_case (self ): __lowerCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ ).eval() __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) __lowerCAmelCase = inputs['''pixel_values'''].to(lowerCAmelCase__ ) __lowerCAmelCase = [el.to(lowerCAmelCase__ ) for el in inputs['''mask_labels''']] __lowerCAmelCase = [el.to(lowerCAmelCase__ ) for el in inputs['''class_labels''']] with torch.no_grad(): __lowerCAmelCase = model(**lowerCAmelCase__ ) self.assertTrue(outputs.loss is not None )
365
'''simple docstring''' from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract _UpperCAmelCase : str = logging.get_logger(__name__) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): return [ int(1_0_0_0 * (box[0] / width)), int(1_0_0_0 * (box[1] / height)), int(1_0_0_0 * (box[2] / width)), int(1_0_0_0 * (box[3] / height)), ] def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = None): __lowerCAmelCase = tesseract_config if tesseract_config is not None else '''''' # apply OCR __lowerCAmelCase = to_pil_image(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = pil_image.size __lowerCAmelCase = pytesseract.image_to_data(lowerCamelCase, lang=lowerCamelCase, output_type='''dict''', config=lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates __lowerCAmelCase = [idx for idx, word in enumerate(lowerCamelCase) if not word.strip()] __lowerCAmelCase = [word for idx, word in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format __lowerCAmelCase = [] for x, y, w, h in zip(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [x, y, x + w, y + h] actual_boxes.append(lowerCamelCase) # finally, normalize the bounding boxes __lowerCAmelCase = [] for box in actual_boxes: normalized_boxes.append(normalize_box(lowerCamelCase, lowerCamelCase, lowerCamelCase)) assert len(lowerCamelCase) == len(lowerCamelCase), "Not as many words as there are bounding boxes" return words, normalized_boxes class a__ ( __A ): """simple docstring""" __UpperCamelCase : str = ['pixel_values'] def __init__(self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ): super().__init__(**__lowercase ) __lowerCAmelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24} __lowerCAmelCase = get_size_dict(__lowercase ) __lowerCAmelCase = do_resize __lowerCAmelCase = size __lowerCAmelCase = resample __lowerCAmelCase = apply_ocr __lowerCAmelCase = ocr_lang __lowerCAmelCase = tesseract_config def _snake_case (self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ): __lowerCAmelCase = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __lowerCAmelCase = (size['''height'''], size['''width''']) return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ): __lowerCAmelCase = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase = size if size is not None else self.size __lowerCAmelCase = get_size_dict(__lowercase ) __lowerCAmelCase = resample if resample is not None else self.resample __lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr __lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang __lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config __lowerCAmelCase = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) # All transformations expect numpy arrays. __lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images] if apply_ocr: requires_backends(self , '''pytesseract''' ) __lowerCAmelCase = [] __lowerCAmelCase = [] for image in images: __lowerCAmelCase , __lowerCAmelCase = apply_tesseract(__lowercase , __lowercase , __lowercase ) words_batch.append(__lowercase ) boxes_batch.append(__lowercase ) if do_resize: __lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) __lowerCAmelCase = [flip_channel_order(__lowercase ) for image in images] __lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] __lowerCAmelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase ) if apply_ocr: __lowerCAmelCase = words_batch __lowerCAmelCase = boxes_batch return data
9
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device _UpperCAmelCase : Union[str, Any] = False class a__ ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case (self ): __lowerCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) # remove text_unet pipe.remove_unused_weights() pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) __lowerCAmelCase = '''A painting of a squirrel eating a burger ''' __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = pipe( prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__lowerCamelCase ) __lowerCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) __lowerCAmelCase = generator.manual_seed(0 ) __lowerCAmelCase = pipe( prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def _snake_case (self ): __lowerCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained( '''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) __lowerCAmelCase = '''A painting of a squirrel eating a burger ''' __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = pipe( prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images __lowerCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCAmelCase = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
366
'''simple docstring''' from ..utils import DummyObject, requires_backends class a__ ( metaclass=__A ): """simple docstring""" __UpperCamelCase : int = ['torch', 'scipy'] def __init__(self , *__lowercase , **__lowercase ): requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''torch''', '''scipy'''] )
9
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : int = {"""vocab_file""": """sentencepiece.bpe.model"""} _UpperCAmelCase : Optional[int] = { """vocab_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""", } } _UpperCAmelCase : Tuple = { """camembert-base""": 5_1_2, } _UpperCAmelCase : Union[str, Any] = """▁""" class a__ ( A_ ): __UpperCamelCase : List[str] = VOCAB_FILES_NAMES __UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Union[str, Any] = ['input_ids', 'attention_mask'] def __init__(self , __lowercase , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=["<s>NOTUSED", "</s>NOTUSED"] , __lowercase = None , **__lowercase , ): # Mask token behave like a normal word, i.e. include the space before it __lowerCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token __lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCamelCase ) ) __lowerCAmelCase = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __lowerCAmelCase = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3} __lowerCAmelCase = len(self.fairseq_tokens_to_ids ) __lowerCAmelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) __lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def _snake_case (self , __lowercase , __lowercase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] __lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case (self , __lowercase , __lowercase = None , __lowercase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def _snake_case (self , __lowercase , __lowercase = None ): __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _snake_case (self ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def _snake_case (self ): __lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case (self , __lowercase ): return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def _snake_case (self , __lowercase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_lowerCamelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_lowerCamelCase ) def _snake_case (self , __lowercase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _snake_case (self , __lowercase ): __lowerCAmelCase = [] __lowerCAmelCase = '''''' __lowerCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCamelCase ) + token __lowerCAmelCase = True __lowerCAmelCase = [] else: current_sub_tokens.append(_lowerCamelCase ) __lowerCAmelCase = False out_string += self.sp_model.decode(_lowerCamelCase ) return out_string.strip() def __getstate__(self ): __lowerCAmelCase = self.__dict__.copy() __lowerCAmelCase = None return state def __setstate__(self , __lowercase ): __lowerCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowerCAmelCase = {} __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case (self , __lowercase , __lowercase = None ): if not os.path.isdir(_lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , '''wb''' ) as fi: __lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,)
367
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class a__ ( unittest.TestCase ): """simple docstring""" def __init__(self , __lowercase , __lowercase = True , __lowercase = None , __lowercase = 32 , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = True , __lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowercase = True , __lowercase=7 , __lowercase=30 , __lowercase=4_00 , __lowercase=3 , ): __lowerCAmelCase = parent __lowerCAmelCase = do_resize __lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_88} __lowerCAmelCase = size_divisor __lowerCAmelCase = do_rescale __lowerCAmelCase = rescale_factor __lowerCAmelCase = do_normalize __lowerCAmelCase = do_center_crop __lowerCAmelCase = image_mean __lowerCAmelCase = image_std __lowerCAmelCase = do_pad __lowerCAmelCase = batch_size __lowerCAmelCase = num_channels __lowerCAmelCase = min_resolution __lowerCAmelCase = max_resolution def _snake_case (self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _snake_case (self , __lowercase , __lowercase=False ): if not batched: __lowerCAmelCase = self.size['''shortest_edge'''] __lowerCAmelCase = image_inputs[0] if isinstance(__lowercase , Image.Image ): __lowerCAmelCase , __lowerCAmelCase = image.size else: __lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2] __lowerCAmelCase = size / min(__lowercase , __lowercase ) if h < w: __lowerCAmelCase , __lowerCAmelCase = size, scale * w else: __lowerCAmelCase , __lowerCAmelCase = scale * h, size __lowerCAmelCase = int((13_33 / 8_00) * size ) if max(__lowercase , __lowercase ) > max_size: __lowerCAmelCase = max_size / max(__lowercase , __lowercase ) __lowerCAmelCase = newh * scale __lowerCAmelCase = neww * scale __lowerCAmelCase , __lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 ) __lowerCAmelCase , __lowerCAmelCase = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __lowerCAmelCase = [] for image in image_inputs: __lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[0] )[0] __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None def _snake_case (self ): __lowerCAmelCase = BridgeTowerImageProcessingTester(self ) @property def _snake_case (self ): return self.image_processor_tester.prepare_image_processor_dict() def _snake_case (self ): __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowercase , '''image_std''' ) ) self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowercase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowercase , '''size''' ) ) self.assertTrue(hasattr(__lowercase , '''size_divisor''' ) ) def _snake_case (self ): pass def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , Image.Image ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , np.ndarray ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , torch.Tensor ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
9
0
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging _UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) class a__ : """simple docstring""" __UpperCamelCase : Optional[int] = 42 __UpperCamelCase : List[Any] = None @staticmethod def _snake_case (): raise NotImplementedError def _snake_case (self , __lowercase , __lowercase , __lowercase , **__lowercase ): raise NotImplementedError def _snake_case (self , __lowercase ): raise NotImplementedError def _snake_case (self ): if not self.is_available(): raise RuntimeError( F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def _snake_case (cls ): return F"""`pip install {cls.pip_package or cls.name}`""" class a__ ( __A ): """simple docstring""" __UpperCamelCase : str = 'optuna' @staticmethod def _snake_case (): return is_optuna_available() def _snake_case (self , __lowercase , __lowercase , __lowercase , **__lowercase ): return run_hp_search_optuna(_a , _a , _a , **_a ) def _snake_case (self , __lowercase ): return default_hp_space_optuna(_a ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict = 'ray' __UpperCamelCase : str = '\'ray[tune]\'' @staticmethod def _snake_case (): return is_ray_available() def _snake_case (self , __lowercase , __lowercase , __lowercase , **__lowercase ): return run_hp_search_ray(_a , _a , _a , **_a ) def _snake_case (self , __lowercase ): return default_hp_space_ray(_a ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Tuple = 'sigopt' @staticmethod def _snake_case (): return is_sigopt_available() def _snake_case (self , __lowercase , __lowercase , __lowercase , **__lowercase ): return run_hp_search_sigopt(_a , _a , _a , **_a ) def _snake_case (self , __lowercase ): return default_hp_space_sigopt(_a ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Union[str, Any] = 'wandb' @staticmethod def _snake_case (): return is_wandb_available() def _snake_case (self , __lowercase , __lowercase , __lowercase , **__lowercase ): return run_hp_search_wandb(_a , _a , _a , **_a ) def _snake_case (self , __lowercase ): return default_hp_space_wandb(_a ) _UpperCAmelCase : int = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __magic_name__( ): __lowerCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_snake_case) > 0: __lowerCAmelCase = available_backends[0].name if len(_snake_case) > 1: logger.info( F"""{len(_snake_case)} hyperparameter search backends available. Using {name} as the default.""") return name raise RuntimeError( '''No hyperparameter search backend available.\n''' + '''\n'''.join( F""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values()))
368
'''simple docstring''' # Imports import numpy as np class a__ : """simple docstring""" def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase ) def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): if red is not None: __lowerCAmelCase = red if green is not None: __lowerCAmelCase = green if blue is not None: __lowerCAmelCase = blue if red_edge is not None: __lowerCAmelCase = red_edge if nir is not None: __lowerCAmelCase = nir return True def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase ) __lowerCAmelCase = { '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('''Index not in the list!''' ) return False def _snake_case (self ): return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def _snake_case (self ): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _snake_case (self ): return self.nir * (self.red / (self.green**2)) def _snake_case (self ): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _snake_case (self ): return (self.nir - self.red) / (self.nir + self.red) def _snake_case (self ): return (self.nir - self.blue) / (self.nir + self.blue) def _snake_case (self ): return (self.redEdge - self.red) / (self.redEdge + self.red) def _snake_case (self ): return (self.nir - self.green) / (self.nir + self.green) def _snake_case (self ): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _snake_case (self ): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _snake_case (self ): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _snake_case (self ): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _snake_case (self ): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _snake_case (self ): return (self.nir / self.green) - 1 def _snake_case (self ): return (self.nir / self.redEdge) - 1 def _snake_case (self ): return (self.red - self.blue) / self.red def _snake_case (self ): __lowerCAmelCase = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _snake_case (self ): return self.nir - self.green def _snake_case (self ): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _snake_case (self ): __lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def _snake_case (self , __lowercase=0.1_6 ): return (self.nir - self.green) / (self.nir + self.green + y) def _snake_case (self , __lowercase=0.5 ): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _snake_case (self ): return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def _snake_case (self , __lowercase=None , __lowercase=None ): return (self.nir - b) / (a * self.red) def _snake_case (self ): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _snake_case (self ): return (self.red + self.green + self.blue) / 3_0.5 def _snake_case (self ): return self.nir / self.red def _snake_case (self ): return (self.rvi() - 1) / (self.rvi() + 1) def _snake_case (self ): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _snake_case (self ): return self.green / (self.nir + self.red + self.green) def _snake_case (self ): return self.nir / (self.nir + self.red + self.green) def _snake_case (self ): return self.red / (self.nir + self.red + self.green) def _snake_case (self ): return (self.green - self.red) / (self.green + self.red) def _snake_case (self ): return (self.red - self.green) / (self.red + self.green) def _snake_case (self ): __lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) __lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _snake_case (self ): return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _snake_case (self ): return self.nir / self.red def _snake_case (self ): return (self.ndvi() + 0.5) ** (1 / 2) def _snake_case (self ): return (self.nir - self.redEdge) / (self.nir + self.redEdge)
9
0
'''simple docstring''' import math class a__ : """simple docstring""" def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = 0.0 __lowerCAmelCase = 0.0 for i in range(len(_a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase ): for i in range(len(_a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def __magic_name__( ): # Training Examples ( m, n ) __lowerCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) __lowerCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training __lowerCAmelCase = SelfOrganizingMap() __lowerCAmelCase = 3 __lowerCAmelCase = 0.5 for _ in range(lowerCamelCase): for j in range(len(lowerCamelCase)): # training sample __lowerCAmelCase = training_samples[j] # Compute the winning vector __lowerCAmelCase = self_organizing_map.get_winner(lowerCamelCase, lowerCamelCase) # Update the winning vector __lowerCAmelCase = self_organizing_map.update(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) # classify test sample __lowerCAmelCase = [0, 0, 0, 1] __lowerCAmelCase = self_organizing_map.get_winner(lowerCamelCase, lowerCamelCase) # results print(F"""Clusters that the test sample belongs to : {winner}""") print(F"""Weights that have been trained : {weights}""") # running the main() function if __name__ == "__main__": main()
369
'''simple docstring''' from math import sqrt def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' must been an int and positive" __lowerCAmelCase = True # 0 and 1 are none primes. if number <= 1: __lowerCAmelCase = False for divisor in range(2, int(round(sqrt(lowerCamelCase))) + 1): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __lowerCAmelCase = False break # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'status' must been from type bool" return status def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __lowerCAmelCase = list(range(2, n + 1)) __lowerCAmelCase = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCamelCase)): for j in range(i + 1, len(lowerCamelCase)): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __lowerCAmelCase = 0 # filters actual prime numbers. __lowerCAmelCase = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2" __lowerCAmelCase = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2, n + 1): if is_prime(lowerCamelCase): ans.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and number >= 0, "'number' must been an int and >= 0" __lowerCAmelCase = [] # this list will be returns of the function. # potential prime number factors. __lowerCAmelCase = 2 __lowerCAmelCase = number if number == 0 or number == 1: ans.append(lowerCamelCase) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCamelCase): while quotient != 1: if is_prime(lowerCamelCase) and (quotient % factor == 0): ans.append(lowerCamelCase) quotient /= factor else: factor += 1 else: ans.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCAmelCase = 0 # prime factorization of 'number' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = max(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCAmelCase = 0 # prime factorization of 'number' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = min(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int" assert isinstance(number % 2 == 0, lowerCamelCase), "compare bust been from type bool" return number % 2 == 0 def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int" assert isinstance(number % 2 != 0, lowerCamelCase), "compare bust been from type bool" return number % 2 != 0 def __magic_name__( lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and (number > 2) and is_even(lowerCamelCase) ), "'number' must been an int, even and > 2" __lowerCAmelCase = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __lowerCAmelCase = get_prime_numbers(lowerCamelCase) __lowerCAmelCase = len(lowerCamelCase) # run variable for while-loops. __lowerCAmelCase = 0 __lowerCAmelCase = None # exit variable. for break up the loops __lowerCAmelCase = True while i < len_pn and loop: __lowerCAmelCase = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __lowerCAmelCase = False ans.append(prime_numbers[i]) ans.append(prime_numbers[j]) j += 1 i += 1 # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (len(lowerCamelCase) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0]) and is_prime(ans[1]) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __lowerCAmelCase = 0 while numbera != 0: __lowerCAmelCase = numbera % numbera __lowerCAmelCase = numbera __lowerCAmelCase = rest # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __lowerCAmelCase = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = prime_factorization(lowerCamelCase) elif numbera == 1 or numbera == 1: __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = max(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(max(lowerCamelCase, lowerCamelCase)): ans *= n else: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(lowerCamelCase): ans *= n done.append(lowerCamelCase) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(lowerCamelCase): ans *= n done.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'number' must been a positive int" __lowerCAmelCase = 0 __lowerCAmelCase = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCamelCase): ans += 1 # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and is_prime( lowerCamelCase), "'ans' must been a prime number and from type int" return ans def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( is_prime(lowerCamelCase) and is_prime(lowerCamelCase) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __lowerCAmelCase = p_number_a + 1 # jump to the next number __lowerCAmelCase = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCamelCase): number += 1 while number < p_number_a: ans.append(lowerCamelCase) number += 1 # fetch the next prime number. while not is_prime(lowerCamelCase): number += 1 # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and ans[0] != p_number_a and ans[len(lowerCamelCase) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 1), "'n' must been int and >= 1" __lowerCAmelCase = [] # will be returned. for divisor in range(1, n + 1): if n % divisor == 0: ans.append(lowerCamelCase) # precondition assert ans[0] == 1 and ans[len(lowerCamelCase) - 1] == n, "Error in function getDivisiors(...)" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number > 1 ), "'number' must been an int and >= 1" __lowerCAmelCase = get_divisors(lowerCamelCase) # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (divisors[0] == 1) and (divisors[len(lowerCamelCase) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1]) == number def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __lowerCAmelCase = gcd(abs(lowerCamelCase), abs(lowerCamelCase)) # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been a int and >= 0" __lowerCAmelCase = 1 # this will be return. for factor in range(1, n + 1): ans *= factor return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been an int and >= 0" __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = 1 # this will be return for _ in range(n - 1): __lowerCAmelCase = ans ans += fiba __lowerCAmelCase = tmp return ans
9
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : int = logging.get_logger(__name__) _UpperCAmelCase : List[Any] = { """facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class a__ ( _a ): """simple docstring""" __UpperCamelCase : str = 'wav2vec2' def __init__(self , __lowercase=32 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0_2 , __lowercase=1e-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=1_28 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.0_5 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=3_20 , __lowercase=2 , __lowercase=0.1 , __lowercase=1_00 , __lowercase=2_56 , __lowercase=2_56 , __lowercase=0.1 , __lowercase="sum" , __lowercase=False , __lowercase=False , __lowercase=2_56 , __lowercase=(5_12, 5_12, 5_12, 5_12, 15_00) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=5_12 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , __lowercase=None , **__lowercase , ): super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ ) __lowerCAmelCase = hidden_size __lowerCAmelCase = feat_extract_norm __lowerCAmelCase = feat_extract_activation __lowerCAmelCase = list(snake_case_ ) __lowerCAmelCase = list(snake_case_ ) __lowerCAmelCase = list(snake_case_ ) __lowerCAmelCase = conv_bias __lowerCAmelCase = num_conv_pos_embeddings __lowerCAmelCase = num_conv_pos_embedding_groups __lowerCAmelCase = len(self.conv_dim ) __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_dropout __lowerCAmelCase = attention_dropout __lowerCAmelCase = activation_dropout __lowerCAmelCase = feat_proj_dropout __lowerCAmelCase = final_dropout __lowerCAmelCase = layerdrop __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = initializer_range __lowerCAmelCase = vocab_size __lowerCAmelCase = do_stable_layer_norm __lowerCAmelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCAmelCase = apply_spec_augment __lowerCAmelCase = mask_time_prob __lowerCAmelCase = mask_time_length __lowerCAmelCase = mask_time_min_masks __lowerCAmelCase = mask_feature_prob __lowerCAmelCase = mask_feature_length __lowerCAmelCase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __lowerCAmelCase = num_codevectors_per_group __lowerCAmelCase = num_codevector_groups __lowerCAmelCase = contrastive_logits_temperature __lowerCAmelCase = feat_quantizer_dropout __lowerCAmelCase = num_negatives __lowerCAmelCase = codevector_dim __lowerCAmelCase = proj_codevector_dim __lowerCAmelCase = diversity_loss_weight # ctc loss __lowerCAmelCase = ctc_loss_reduction __lowerCAmelCase = ctc_zero_infinity # adapter __lowerCAmelCase = add_adapter __lowerCAmelCase = adapter_kernel_size __lowerCAmelCase = adapter_stride __lowerCAmelCase = num_adapter_layers __lowerCAmelCase = output_hidden_size or hidden_size __lowerCAmelCase = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. __lowerCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __lowerCAmelCase = list(snake_case_ ) __lowerCAmelCase = list(snake_case_ ) __lowerCAmelCase = list(snake_case_ ) __lowerCAmelCase = xvector_output_dim @property def _snake_case (self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
370
'''simple docstring''' import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed _UpperCAmelCase : Dict = """true""" def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6): set_seed(4_2) __lowerCAmelCase = RegressionModel() __lowerCAmelCase = deepcopy(lowerCamelCase) __lowerCAmelCase = RegressionDataset(length=lowerCamelCase) __lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase) model.to(accelerator.device) __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase) return model, ddp_model, dataloader def __magic_name__( lowerCamelCase, lowerCamelCase=False): __lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''') __lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''') def tokenize_function(lowerCamelCase): __lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase) return outputs with accelerator.main_process_first(): __lowerCAmelCase = dataset.map( lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) __lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''') def collate_fn(lowerCamelCase): if use_longest: return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''') return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''') return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6) def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase) __lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [] for batch in dataloader: __lowerCAmelCase , __lowerCAmelCase = batch.values() with torch.no_grad(): __lowerCAmelCase = model(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) __lowerCAmelCase , __lowerCAmelCase = [], [] for logit, targ in logits_and_targets: logits.append(lowerCamelCase) targs.append(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase) return logits, targs def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase) assert ( len(lowerCamelCase) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}""" def __magic_name__( lowerCamelCase = False, lowerCamelCase = False): __lowerCAmelCase = evaluate.load('''glue''', '''mrpc''') __lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase) # First do baseline __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no'''] model.to(lowerCamelCase) model.eval() for batch in dataloader: batch.to(lowerCamelCase) with torch.inference_mode(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=lowerCamelCase, references=batch['''labels''']) __lowerCAmelCase = metric.compute() # Then do distributed __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) __lowerCAmelCase = batch['''labels'''] __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase) __lowerCAmelCase = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def __magic_name__( ): __lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''') for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""") test_mrpc(lowerCamelCase, lowerCamelCase) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''') for split_batches in [True, False]: for dispatch_batches in [True, False]: __lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""") test_torch_metrics(lowerCamelCase, 9_9) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''') __lowerCAmelCase = Accelerator() test_torch_metrics(lowerCamelCase, 5_1_2) accelerator.state._reset_state() def __magic_name__( lowerCamelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
9
0
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[Any] = logging.get_logger(__name__) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = original_name.split('''.''')[0] __lowerCAmelCase = key.split('''.''') __lowerCAmelCase = int(key_list[key_list.index(__lowerCAmelCase) - 2]) __lowerCAmelCase = int(key_list[key_list.index(__lowerCAmelCase) - 1]) __lowerCAmelCase = orig_block_num - offset __lowerCAmelCase = key.replace(F"""{orig_block_num}.{layer_num}.{original_name}""", F"""block.{new_block_num}.{layer_num}.{new_name}""") return key def __magic_name__( lowerCamelCase): __lowerCAmelCase = OrderedDict() __lowerCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith('''network'''): __lowerCAmelCase = key.replace('''network''', '''poolformer.encoder''') if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''') and "patch_embed" not in key: patch_emb_offset += 1 __lowerCAmelCase = key[: key.find('''proj''')] __lowerCAmelCase = key.replace(__lowerCAmelCase, F"""patch_embeddings.{total_embed_found}.""") __lowerCAmelCase = key.replace('''proj''', '''projection''') if key.endswith('''bias'''): total_embed_found += 1 if "patch_embeddings" in key: __lowerCAmelCase = '''poolformer.encoder.''' + key if "mlp.fc1" in key: __lowerCAmelCase = replace_key_with_offset(__lowerCAmelCase, __lowerCAmelCase, '''mlp.fc1''', '''output.conv1''') if "mlp.fc2" in key: __lowerCAmelCase = replace_key_with_offset(__lowerCAmelCase, __lowerCAmelCase, '''mlp.fc2''', '''output.conv2''') if "norm1" in key: __lowerCAmelCase = replace_key_with_offset(__lowerCAmelCase, __lowerCAmelCase, '''norm1''', '''before_norm''') if "norm2" in key: __lowerCAmelCase = replace_key_with_offset(__lowerCAmelCase, __lowerCAmelCase, '''norm2''', '''after_norm''') if "layer_scale_1" in key: __lowerCAmelCase = replace_key_with_offset(__lowerCAmelCase, __lowerCAmelCase, '''layer_scale_1''', '''layer_scale_1''') if "layer_scale_2" in key: __lowerCAmelCase = replace_key_with_offset(__lowerCAmelCase, __lowerCAmelCase, '''layer_scale_2''', '''layer_scale_2''') if "head" in key: __lowerCAmelCase = key.replace('''head''', '''classifier''') __lowerCAmelCase = value return new_state_dict def __magic_name__( ): __lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase = Image.open(requests.get(__lowerCAmelCase, stream=__lowerCAmelCase).raw) return image @torch.no_grad() def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = PoolFormerConfig() # set attributes based on model_name __lowerCAmelCase = '''huggingface/label-files''' __lowerCAmelCase = model_name[-3:] __lowerCAmelCase = 1_0_0_0 __lowerCAmelCase = '''imagenet-1k-id2label.json''' __lowerCAmelCase = (1, 1_0_0_0) # set config attributes __lowerCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase, __lowerCAmelCase, repo_type='''dataset'''), '''r''')) __lowerCAmelCase = {int(__lowerCAmelCase): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": __lowerCAmelCase = [2, 2, 6, 2] __lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] __lowerCAmelCase = 4.0 __lowerCAmelCase = 0.9 elif size == "s24": __lowerCAmelCase = [4, 4, 1_2, 4] __lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] __lowerCAmelCase = 4.0 __lowerCAmelCase = 0.9 elif size == "s36": __lowerCAmelCase = [6, 6, 1_8, 6] __lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] __lowerCAmelCase = 4.0 __lowerCAmelCase = 1E-6 __lowerCAmelCase = 0.9 elif size == "m36": __lowerCAmelCase = [6, 6, 1_8, 6] __lowerCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] __lowerCAmelCase = 4.0 __lowerCAmelCase = 1E-6 __lowerCAmelCase = 0.95 elif size == "m48": __lowerCAmelCase = [8, 8, 2_4, 8] __lowerCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] __lowerCAmelCase = 4.0 __lowerCAmelCase = 1E-6 __lowerCAmelCase = 0.95 else: raise ValueError(F"""Size {size} not supported""") # load image processor __lowerCAmelCase = PoolFormerImageProcessor(crop_pct=__lowerCAmelCase) # Prepare image __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=__lowerCAmelCase, return_tensors='''pt''').pixel_values logger.info(F"""Converting model {model_name}...""") # load original state dict __lowerCAmelCase = torch.load(__lowerCAmelCase, map_location=torch.device('''cpu''')) # rename keys __lowerCAmelCase = rename_keys(__lowerCAmelCase) # create HuggingFace model and load state dict __lowerCAmelCase = PoolFormerForImageClassification(__lowerCAmelCase) model.load_state_dict(__lowerCAmelCase) model.eval() # Define image processor __lowerCAmelCase = PoolFormerImageProcessor(crop_pct=__lowerCAmelCase) __lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='''pt''').pixel_values # forward pass __lowerCAmelCase = model(__lowerCAmelCase) __lowerCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": __lowerCAmelCase = torch.tensor([-0.30_45, -0.67_58, -0.48_69]) elif size == "s24": __lowerCAmelCase = torch.tensor([0.44_02, -0.13_74, -0.80_45]) elif size == "s36": __lowerCAmelCase = torch.tensor([-0.60_80, -0.51_33, -0.58_98]) elif size == "m36": __lowerCAmelCase = torch.tensor([0.39_52, 0.22_63, -1.26_68]) elif size == "m48": __lowerCAmelCase = torch.tensor([0.11_67, -0.06_56, -0.34_23]) else: raise ValueError(F"""Size {size} not supported""") # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3], __lowerCAmelCase, atol=1E-2) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""") Path(__lowerCAmelCase).mkdir(exist_ok=__lowerCAmelCase) model.save_pretrained(__lowerCAmelCase) print(F"""Saving image processor to {pytorch_dump_folder_path}""") image_processor.save_pretrained(__lowerCAmelCase) if __name__ == "__main__": _UpperCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""poolformer_s12""", type=str, help="""Name of the model you\'d like to convert.""", ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _UpperCAmelCase : Any = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
371
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : str = logging.get_logger(__name__) _UpperCAmelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class a__ ( __A ): """simple docstring""" __UpperCamelCase : str = 'roberta' def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ): super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = use_cache __lowerCAmelCase = classifier_dropout class a__ ( __A ): """simple docstring""" @property def _snake_case (self ): if self.task == "multiple-choice": __lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
9
0
'''simple docstring''' def __magic_name__( lowerCamelCase): if length <= 0 or not isinstance(_UpperCAmelCase, _UpperCAmelCase): raise ValueError('''Length must be a positive integer.''') return [n * (2 * n - 1) for n in range(_UpperCAmelCase)] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=1_0))
350
'''simple docstring''' import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = old_name if "patch_embed" in old_name: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_name.split('''.''') if layer == "0": __lowerCAmelCase = old_name.replace('''0''', '''convolution1''') elif layer == "1": __lowerCAmelCase = old_name.replace('''1''', '''batchnorm_before''') elif layer == "3": __lowerCAmelCase = old_name.replace('''3''', '''convolution2''') else: __lowerCAmelCase = old_name.replace('''4''', '''batchnorm_after''') if "network" in old_name and re.search(r'''\d\.\d''', lowerCamelCase): __lowerCAmelCase = r'''\b\d{2}\b''' if bool(re.search(lowerCamelCase, lowerCamelCase)): __lowerCAmelCase = re.search(r'''\d\.\d\d.''', lowerCamelCase).group() else: __lowerCAmelCase = re.search(r'''\d\.\d.''', lowerCamelCase).group() if int(match[0]) < 6: __lowerCAmelCase = old_name.replace(lowerCamelCase, '''''') __lowerCAmelCase = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1]) __lowerCAmelCase = '''intermediate_stages.''' + trimmed_name else: __lowerCAmelCase = old_name.replace(lowerCamelCase, '''''') if int(match[2]) < num_meta4D_last_stage: __lowerCAmelCase = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2]) else: __lowerCAmelCase = str(int(match[2]) - num_meta4D_last_stage) __lowerCAmelCase = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index) if "norm1" in old_name: __lowerCAmelCase = trimmed_name.replace('''norm1''', '''layernorm1''') elif "norm2" in old_name: __lowerCAmelCase = trimmed_name.replace('''norm2''', '''layernorm2''') elif "fc1" in old_name: __lowerCAmelCase = trimmed_name.replace('''fc1''', '''linear_in''') elif "fc2" in old_name: __lowerCAmelCase = trimmed_name.replace('''fc2''', '''linear_out''') __lowerCAmelCase = '''last_stage.''' + trimmed_name elif "network" in old_name and re.search(r'''.\d.''', lowerCamelCase): __lowerCAmelCase = old_name.replace('''network''', '''intermediate_stages''') if "fc" in new_name: __lowerCAmelCase = new_name.replace('''fc''', '''convolution''') elif ("norm1" in new_name) and ("layernorm1" not in new_name): __lowerCAmelCase = new_name.replace('''norm1''', '''batchnorm_before''') elif ("norm2" in new_name) and ("layernorm2" not in new_name): __lowerCAmelCase = new_name.replace('''norm2''', '''batchnorm_after''') if "proj" in new_name: __lowerCAmelCase = new_name.replace('''proj''', '''projection''') if "dist_head" in new_name: __lowerCAmelCase = new_name.replace('''dist_head''', '''distillation_classifier''') elif "head" in new_name: __lowerCAmelCase = new_name.replace('''head''', '''classifier''') elif "patch_embed" in new_name: __lowerCAmelCase = '''efficientformer.''' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": __lowerCAmelCase = new_name.replace('''norm''', '''layernorm''') __lowerCAmelCase = '''efficientformer.''' + new_name else: __lowerCAmelCase = '''efficientformer.encoder.''' + new_name return new_name def __magic_name__( lowerCamelCase, lowerCamelCase): for key in checkpoint.copy().keys(): __lowerCAmelCase = checkpoint.pop(lowerCamelCase) __lowerCAmelCase = val return checkpoint def __magic_name__( ): __lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw) return image def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model'''] __lowerCAmelCase = EfficientFormerConfig.from_json_file(lowerCamelCase) __lowerCAmelCase = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase) __lowerCAmelCase = '''_'''.join(checkpoint_path.split('''/''')[-1].split('''.''')[0].split('''_''')[:-1]) __lowerCAmelCase = config.depths[-1] - config.num_metaad_blocks + 1 __lowerCAmelCase = convert_torch_checkpoint(lowerCamelCase, lowerCamelCase) model.load_state_dict(lowerCamelCase) model.eval() __lowerCAmelCase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } # prepare image __lowerCAmelCase = prepare_img() __lowerCAmelCase = 2_5_6 __lowerCAmelCase = 2_2_4 __lowerCAmelCase = EfficientFormerImageProcessor( size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], ) __lowerCAmelCase = processor(images=lowerCamelCase, return_tensors='''pt''').pixel_values # original processing pipeline __lowerCAmelCase = Compose( [ Resize(lowerCamelCase, interpolation=pillow_resamplings['''bicubic''']), CenterCrop(lowerCamelCase), ToTensor(), Normalize(lowerCamelCase, lowerCamelCase), ]) __lowerCAmelCase = image_transforms(lowerCamelCase).unsqueeze(0) assert torch.allclose(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = model(lowerCamelCase) __lowerCAmelCase = outputs.logits __lowerCAmelCase = (1, 1_0_0_0) if "l1" in model_name: __lowerCAmelCase = torch.Tensor( [-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28]) assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3) assert logits.shape == expected_shape elif "l3" in model_name: __lowerCAmelCase = torch.Tensor( [-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27]) assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3) assert logits.shape == expected_shape elif "l7" in model_name: __lowerCAmelCase = torch.Tensor( [-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78]) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""") # Save Checkpoints Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) model.save_pretrained(lowerCamelCase) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""") processor.save_pretrained(lowerCamelCase) print(F"""Processor successfuly saved at {pytorch_dump_path}""") if push_to_hub: print('''Pushing model to the hub...''') model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=lowerCamelCase, ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=lowerCamelCase, ) if __name__ == "__main__": _UpperCAmelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) _UpperCAmelCase : List[str] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
9
0
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class a__ : """simple docstring""" def __init__(self , __lowercase , __lowercase=12 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=32 , __lowercase=2 , __lowercase=4 , __lowercase=37 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=0.0_2 , __lowercase=0 , __lowercase=None , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_input_mask __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = projection_dim __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = dropout __lowerCAmelCase = attention_dropout __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = initializer_range __lowerCAmelCase = scope __lowerCAmelCase = bos_token_id def _snake_case (self ): __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_input_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: __lowerCAmelCase = input_mask.numpy() __lowerCAmelCase , __lowerCAmelCase = input_mask.shape __lowerCAmelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = 1 __lowerCAmelCase = 0 __lowerCAmelCase = self.get_config() return config, input_ids, tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) def _snake_case (self ): return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _snake_case (self , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = TFBlipTextModel(config=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _snake_case (self ): __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs __lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class a__ ( lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[int] = (TFBlipTextModel,) if is_tf_available() else () __UpperCamelCase : int = False __UpperCamelCase : List[Any] = False __UpperCamelCase : List[Any] = False def _snake_case (self ): __lowerCAmelCase = BlipTextModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def _snake_case (self ): self.config_tester.run_common_tests() def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _snake_case (self ): pass def _snake_case (self ): pass @unittest.skip(reason='''Blip does not use inputs_embeds''' ) def _snake_case (self ): pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def _snake_case (self ): pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def _snake_case (self ): pass @slow def _snake_case (self ): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = TFBlipTextModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _snake_case (self , __lowercase=True ): super().test_pt_tf_model_equivalence(allow_missing_keys=__SCREAMING_SNAKE_CASE )
351
'''simple docstring''' from __future__ import annotations import math def __magic_name__( lowerCamelCase, lowerCamelCase): if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2: raise Exception('''Matrices are not 2x2''') __lowerCAmelCase = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def __magic_name__( lowerCamelCase, lowerCamelCase): return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(lowerCamelCase)) ] def __magic_name__( lowerCamelCase, lowerCamelCase): return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(lowerCamelCase)) ] def __magic_name__( lowerCamelCase): if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0: raise Exception('''Odd matrices are not supported!''') __lowerCAmelCase = len(lowerCamelCase) __lowerCAmelCase = matrix_length // 2 __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)] __lowerCAmelCase = [ [a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase) ] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)] return top_left, top_right, bot_left, bot_right def __magic_name__( lowerCamelCase): return len(lowerCamelCase), len(matrix[0]) def __magic_name__( lowerCamelCase): print('''\n'''.join(str(lowerCamelCase) for line in matrix)) def __magic_name__( lowerCamelCase, lowerCamelCase): if matrix_dimensions(lowerCamelCase) == (2, 2): return default_matrix_multiplication(lowerCamelCase, lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase) __lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase) __lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase) __lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase) # construct the new matrix from our 4 quadrants __lowerCAmelCase = [] for i in range(len(lowerCamelCase)): new_matrix.append(top_left[i] + top_right[i]) for i in range(len(lowerCamelCase)): new_matrix.append(bot_left[i] + bot_right[i]) return new_matrix def __magic_name__( lowerCamelCase, lowerCamelCase): if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]: __lowerCAmelCase = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(lowerCamelCase) __lowerCAmelCase = matrix_dimensions(lowerCamelCase) __lowerCAmelCase = matrix_dimensions(lowerCamelCase) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase) __lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase)))) __lowerCAmelCase = matrixa __lowerCAmelCase = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0, lowerCamelCase): if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): new_matrixa[i].append(0) else: new_matrixa.append([0] * maxim) if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): new_matrixa[i].append(0) else: new_matrixa.append([0] * maxim) __lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase) # Removing the additional zeros for i in range(0, lowerCamelCase): if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": _UpperCAmelCase : List[str] = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] _UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]] print(strassen(matrixa, matrixa))
9
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) _UpperCAmelCase : Tuple = { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""", """umberto-commoncrawl-cased-v1""": ( """https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json""" ), """umberto-wikipedia-uncased-v1""": ( """https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json""" ), } class a__ ( snake_case_ ): """simple docstring""" __UpperCamelCase : str = 'camembert' def __init__(self , __lowercase=3_05_22 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ): super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = use_cache __lowerCAmelCase = classifier_dropout class a__ ( snake_case_ ): """simple docstring""" @property def _snake_case (self ): if self.task == "multiple-choice": __lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
352
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) __lowerCAmelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowercase , __lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase ) __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowercase ) self.assertIsInstance(processor_fast.tokenizer , __lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowercase ) self.assertIsInstance(processor_fast.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 ) __lowerCAmelCase = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' ) __lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = tokenizer(__lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__lowercase ) __lowerCAmelCase = tokenizer.batch_decode(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
9
0
'''simple docstring''' import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _UpperCAmelCase : List[str] = logging.get_logger(__name__) _UpperCAmelCase : List[Any] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } _UpperCAmelCase : List[str] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): for attribute in key.split('''.'''): __lowerCAmelCase = getattr(_lowerCAmelCase, _lowerCAmelCase) if weight_type is not None: __lowerCAmelCase = getattr(_lowerCAmelCase, _lowerCAmelCase).shape else: __lowerCAmelCase = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCAmelCase = value elif weight_type == "weight_g": __lowerCAmelCase = value elif weight_type == "weight_v": __lowerCAmelCase = value elif weight_type == "bias": __lowerCAmelCase = value else: __lowerCAmelCase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""") def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [] __lowerCAmelCase = fairseq_model.state_dict() __lowerCAmelCase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight __lowerCAmelCase = None for name, value in fairseq_dict.items(): __lowerCAmelCase = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, hf_model.config.feat_extract_norm == '''group''', ) __lowerCAmelCase = True elif name.split('''.''')[0] == "proj": __lowerCAmelCase = fairseq_model.proj __lowerCAmelCase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]: __lowerCAmelCase = True if "*" in mapped_key: __lowerCAmelCase = name.split(_lowerCAmelCase)[0].split('''.''')[-2] __lowerCAmelCase = mapped_key.replace('''*''', _lowerCAmelCase) if "weight_g" in name: __lowerCAmelCase = '''weight_g''' elif "weight_v" in name: __lowerCAmelCase = '''weight_v''' elif "bias" in name: __lowerCAmelCase = '''bias''' elif "weight" in name: __lowerCAmelCase = '''weight''' else: __lowerCAmelCase = None set_recursively(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase) continue if not is_used: unused_weights.append(_lowerCAmelCase) logger.warning(F"""Unused weights: {unused_weights}""") return proj_weight def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = full_name.split('''conv_layers.''')[-1] __lowerCAmelCase = name.split('''.''') __lowerCAmelCase = int(items[0]) __lowerCAmelCase = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCAmelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCAmelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowerCAmelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCAmelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""") else: unused_weights.append(_lowerCAmelCase) def __magic_name__( lowerCamelCase): __lowerCAmelCase , __lowerCAmelCase = emb.weight.shape __lowerCAmelCase = nn.Linear(_lowerCAmelCase, _lowerCAmelCase, bias=_lowerCAmelCase) __lowerCAmelCase = emb.weight.data return lin_layer def __magic_name__( lowerCamelCase): with open(_lowerCAmelCase, '''r''', encoding='''utf-8''') as f: __lowerCAmelCase = f.readlines() __lowerCAmelCase = [line.split(''' ''')[0] for line in lines] __lowerCAmelCase = len(_lowerCAmelCase) __lowerCAmelCase = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(_lowerCAmelCase, range(4, num_words + 4)))) return vocab_dict @torch.no_grad() def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ): __lowerCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase) __lowerCAmelCase = SpeechaTextaConfig.from_pretrained( _lowerCAmelCase, vocab_size=_lowerCAmelCase, decoder_layers=_lowerCAmelCase, do_stable_layer_norm=_lowerCAmelCase) __lowerCAmelCase = WavaVecaFeatureExtractor( feature_size=1, sampling_rate=1_6_0_0_0, padding_value=0, do_normalize=_lowerCAmelCase, return_attention_mask=_lowerCAmelCase, ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1])}) __lowerCAmelCase = model[0].eval() # set weights for wav2vec2 encoder __lowerCAmelCase = WavaVecaModel(_lowerCAmelCase) __lowerCAmelCase = recursively_load_weights_wavaveca(model.encoder, _lowerCAmelCase) __lowerCAmelCase = SpeechaTextaForCausalLM(_lowerCAmelCase) __lowerCAmelCase , __lowerCAmelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=_lowerCAmelCase) # set output linear layer unexpected_keys.remove('''embed_out''') __lowerCAmelCase = nn.Parameter(model.decoder.embed_out.detach()) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""") logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""") __lowerCAmelCase = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase, decoder=_lowerCAmelCase) __lowerCAmelCase = False # add projection layer __lowerCAmelCase = nn.Parameter(projection_layer.weight) __lowerCAmelCase = nn.Parameter(projection_layer.bias) __lowerCAmelCase = create_vocab_dict(_lowerCAmelCase) with open(os.path.join(_lowerCAmelCase, '''vocab.json'''), '''w''') as fp: json.dump(_lowerCAmelCase, _lowerCAmelCase) __lowerCAmelCase = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase, '''vocab.json''')) tokenizer.save_pretrained(_lowerCAmelCase) __lowerCAmelCase = hf_wavavec.config.to_dict() __lowerCAmelCase = tokenizer.pad_token_id __lowerCAmelCase = tokenizer.bos_token_id __lowerCAmelCase = tokenizer.eos_token_id __lowerCAmelCase = '''speech_to_text_2''' __lowerCAmelCase = '''wav2vec2''' __lowerCAmelCase = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase) hf_wavavec.save_pretrained(_lowerCAmelCase) feature_extractor.save_pretrained(_lowerCAmelCase) if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-large-lv60""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/s2t-small-mustc-en-fr-st""", type=str, help="""Path to hf decoder s2t checkpoint config""", ) parser.add_argument("""--vocab_size""", default=1_0_2_2_4, type=int, help="""Vocab size of decoder""") parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""") _UpperCAmelCase : Optional[int] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
353
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class a__ ( __A ): """simple docstring""" def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ): __lowerCAmelCase = 1.0 if scale is None else scale __lowerCAmelCase = 0.0 if loc is None else loc super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] ) @property def _snake_case (self ): return self.base_dist.mean * self.scale + self.loc @property def _snake_case (self ): return self.base_dist.variance * self.scale**2 @property def _snake_case (self ): return self.variance.sqrt() class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ): super().__init__(**__lowercase ) __lowerCAmelCase = args_dim __lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] ) __lowerCAmelCase = domain_map def _snake_case (self , __lowercase ): __lowerCAmelCase = [proj(__lowercase ) for proj in self.proj] return self.domain_map(*__lowercase ) class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase ): super().__init__() __lowerCAmelCase = function def _snake_case (self , __lowercase , *__lowercase ): return self.function(__lowercase , *__lowercase ) class a__ : """simple docstring""" __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__(self , __lowercase = 1 ): __lowerCAmelCase = dim __lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim} def _snake_case (self , __lowercase ): if self.dim == 1: return self.distribution_class(*__lowercase ) else: return Independent(self.distribution_class(*__lowercase ) , 1 ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ): __lowerCAmelCase = self._base_distribution(__lowercase ) if loc is None and scale is None: return distr else: return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim ) @property def _snake_case (self ): return () if self.dim == 1 else (self.dim,) @property def _snake_case (self ): return len(self.event_shape ) @property def _snake_case (self ): return 0.0 def _snake_case (self , __lowercase ): return ParameterProjection( in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _snake_case (self , *__lowercase ): raise NotImplementedError() @staticmethod def _snake_case (__lowercase ): return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0 class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def _snake_case (cls , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) __lowerCAmelCase = 2.0 + cls.squareplus(__lowercase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def _snake_case (cls , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def _snake_case (cls , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _snake_case (self , __lowercase ): __lowerCAmelCase , __lowerCAmelCase = distr_args if self.dim == 1: return self.distribution_class(total_count=__lowercase , logits=__lowercase ) else: return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ): __lowerCAmelCase , __lowerCAmelCase = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
9
0
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = [10, 20, 30, 40, 50, 60] __lowerCAmelCase = [2, 4, 6, 8, 10, 12] __lowerCAmelCase = 1_00 self.assertEqual(kp.calc_profit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 2_10 ) def _snake_case (self ): self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' ) def _snake_case (self ): self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , '''Weight can not be negative.''' ) def _snake_case (self ): self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , '''Profit can not be negative.''' ) def _snake_case (self ): self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' ) def _snake_case (self ): self.assertRaisesRegex( _SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''' ) if __name__ == "__main__": unittest.main()
354
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class a__ ( __A ): """simple docstring""" __UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa' __UpperCamelCase : List[str] = ( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) __UpperCamelCase : Optional[int] = 'document_qa' __UpperCamelCase : Optional[int] = AutoProcessor __UpperCamelCase : Tuple = VisionEncoderDecoderModel __UpperCamelCase : Any = ['image', 'text'] __UpperCamelCase : Optional[Any] = ['text'] def __init__(self , *__lowercase , **__lowercase ): if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' __lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase ) __lowerCAmelCase = self.pre_processor.tokenizer( __lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids __lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _snake_case (self , __lowercase ): return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences def _snake_case (self , __lowercase ): __lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0] __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) __lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token __lowerCAmelCase = self.pre_processor.tokenajson(__lowercase ) return sequence["answer"]
9
0
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever _UpperCAmelCase : Optional[Any] = logging.getLogger(__name__) class a__ ( snake_case_ ): """simple docstring""" def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase=None ): super().__init__( __lowercase , question_encoder_tokenizer=__lowercase , generator_tokenizer=__lowercase , index=__lowercase , init_retrieval=__lowercase , ) __lowerCAmelCase = None def _snake_case (self , __lowercase ): logger.info('''initializing retrieval''' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('''dist initialized''' ) # needs to be set manually __lowerCAmelCase = self._infer_socket_ifname() # avoid clash with the NCCL port __lowerCAmelCase = str(distributed_port + 1 ) __lowerCAmelCase = dist.new_group(ranks=__lowercase , backend='''gloo''' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('''dist not initialized / main''' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def _snake_case (self ): return dist.get_rank(group=self.process_group ) == 0 def _snake_case (self , __lowercase , __lowercase , __lowercase=torch.floataa ): __lowerCAmelCase = torch.empty(__lowercase , dtype=__lowercase ) dist.scatter(__lowercase , src=0 , scatter_list=__lowercase , group=self.process_group ) return target_tensor def _snake_case (self ): __lowerCAmelCase = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __lowerCAmelCase = next((addr for addr in addrs if addr.startswith('''e''' )) , __lowercase ) return ifname def _snake_case (self , __lowercase , __lowercase ): if not dist.is_initialized(): __lowerCAmelCase = self._main_retrieve(__lowercase , __lowercase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowercase ) # distributed training __lowerCAmelCase = dist.get_world_size(group=self.process_group ) # gather logic __lowerCAmelCase = None if self._is_main(): __lowerCAmelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowercase )] dist.gather(torch.tensor(__lowercase ) , dst=0 , gather_list=__lowercase , group=self.process_group ) # scatter logic __lowerCAmelCase = question_hidden_states.shape[0] __lowerCAmelCase = [] __lowerCAmelCase = [] if self._is_main(): assert len(__lowercase ) == world_size __lowerCAmelCase = self._main_retrieve(torch.cat(__lowercase ).numpy() , __lowercase ) __lowerCAmelCase = torch.tensor(__lowercase ), torch.tensor(__lowercase ) __lowerCAmelCase = self._chunk_tensor(__lowercase , __lowercase ) __lowerCAmelCase = self._chunk_tensor(__lowercase , __lowercase ) __lowerCAmelCase = self._scattered(__lowercase , [n_queries, n_docs] , target_type=torch.intaa ) __lowerCAmelCase = self._scattered(__lowercase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowercase )
355
'''simple docstring''' def __magic_name__( lowerCamelCase): __lowerCAmelCase = 1 __lowerCAmelCase = 2 while i * i <= n: __lowerCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __magic_name__( ): __lowerCAmelCase = 1 __lowerCAmelCase = 1 while True: i += 1 t_num += i if count_divisors(lowerCamelCase) > 5_0_0: break return t_num if __name__ == "__main__": print(solution())
9
0
'''simple docstring''' from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar _UpperCAmelCase : int = TypeVar("""T""") class a__ ( Generic[T] ): """simple docstring""" __UpperCamelCase : deque[T] # Cache store of keys __UpperCamelCase : set[T] # References of the keys in cache __UpperCamelCase : int = 10 # Maximum capacity of cache def __init__(self , __lowercase ): __lowerCAmelCase = deque() __lowerCAmelCase = set() if not n: __lowerCAmelCase = sys.maxsize elif n < 0: raise ValueError('''n should be an integer greater than 0.''' ) else: __lowerCAmelCase = n def _snake_case (self , __lowercase ): if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: __lowerCAmelCase = self.dq_store.pop() self.key_reference.remove(lowerCamelCase__ ) else: self.dq_store.remove(lowerCamelCase__ ) self.dq_store.appendleft(lowerCamelCase__ ) self.key_reference.add(lowerCamelCase__ ) def _snake_case (self ): for k in self.dq_store: print(lowerCamelCase__ ) def __repr__(self ): return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : LRUCache[str | int] = LRUCache(4) lru_cache.refer("""A""") lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer("""A""") lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
356
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) self.assertTrue(isinstance(dc.token_ids , __lowercase ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _snake_case (self ): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __lowerCAmelCase = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowercase ): DisjunctiveConstraint(__lowercase ) # fails here def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 ) __lowerCAmelCase = stepped is True and completed is True and reset is False self.assertTrue(__lowercase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
9
0
'''simple docstring''' def __magic_name__( lowerCamelCase, lowerCamelCase): if not (isinstance(A__, A__) and isinstance(A__, A__)): raise ValueError('''longest_common_substring() takes two strings for inputs''') __lowerCAmelCase = len(A__) __lowerCAmelCase = len(A__) __lowerCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1)] __lowerCAmelCase = 0 __lowerCAmelCase = 0 for i in range(1, texta_length + 1): for j in range(1, texta_length + 1): if texta[i - 1] == texta[j - 1]: __lowerCAmelCase = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: __lowerCAmelCase = i __lowerCAmelCase = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
357
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets _UpperCAmelCase : List[str] = """ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ _UpperCAmelCase : str = """ Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric(\"mean_iou\") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ _UpperCAmelCase : Tuple = """\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }""" def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): if label_map is not None: for old_id, new_id in label_map.items(): __lowerCAmelCase = new_id # turn into Numpy arrays __lowerCAmelCase = np.array(lowerCamelCase) __lowerCAmelCase = np.array(lowerCamelCase) if reduce_labels: __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label - 1 __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label != ignore_index __lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = pred_label[mask] __lowerCAmelCase = np.array(lowerCamelCase)[mask] __lowerCAmelCase = pred_label[pred_label == label] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) # compute metrics __lowerCAmelCase = {} __lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum() __lowerCAmelCase = total_area_intersect / total_area_union __lowerCAmelCase = total_area_intersect / total_area_label __lowerCAmelCase = np.nanmean(lowerCamelCase) __lowerCAmelCase = np.nanmean(lowerCamelCase) __lowerCAmelCase = all_acc __lowerCAmelCase = iou __lowerCAmelCase = acc if nan_to_num is not None: __lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def _snake_case (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { '''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), '''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), } ) , reference_urls=[ '''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py''' ] , ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ): __lowerCAmelCase = mean_iou( results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , ) return iou_result
9
0
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def __magic_name__( lowerCamelCase): __lowerCAmelCase = np.inf def set_batch_size(lowerCamelCase) -> None: nonlocal batch_size if isinstance(_UpperCAmelCase, _UpperCAmelCase): __lowerCAmelCase = min(_UpperCAmelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS) elif isinstance(_UpperCAmelCase, _UpperCAmelCase): __lowerCAmelCase = min(_UpperCAmelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS) elif isinstance(_UpperCAmelCase, _UpperCAmelCase) and feature.dtype == "binary": __lowerCAmelCase = min(_UpperCAmelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS) _visit(_UpperCAmelCase, _UpperCAmelCase) return None if batch_size is np.inf else batch_size class a__ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__(self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = None , **__lowercase , ): super().__init__( __lowercase , split=__lowercase , features=__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase , streaming=__lowercase , num_proc=__lowercase , **__lowercase , ) __lowerCAmelCase = path_or_paths if isinstance(__lowercase , __lowercase ) else {self.split: path_or_paths} __lowerCAmelCase = _PACKAGED_DATASETS_MODULES['parquet'][1] __lowerCAmelCase = Parquet( cache_dir=__lowercase , data_files=__lowercase , features=__lowercase , hash=__lowercase , **__lowercase , ) def _snake_case (self ): # Build iterable dataset if self.streaming: __lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None self.builder.download_and_prepare( download_config=__lowercase , download_mode=__lowercase , verification_mode=__lowercase , base_path=__lowercase , num_proc=self.num_proc , ) __lowerCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=__lowercase , in_memory=self.keep_in_memory ) return dataset class a__ : """simple docstring""" def __init__(self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ): __lowerCAmelCase = dataset __lowerCAmelCase = path_or_buf __lowerCAmelCase = batch_size or get_writer_batch_size(dataset.features ) __lowerCAmelCase = parquet_writer_kwargs def _snake_case (self ): __lowerCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: __lowerCAmelCase = self._write(file_obj=__lowercase , batch_size=__lowercase , **self.parquet_writer_kwargs ) else: __lowerCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__lowercase , **self.parquet_writer_kwargs ) return written def _snake_case (self , __lowercase , __lowercase , **__lowercase ): __lowerCAmelCase = 0 __lowerCAmelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __lowercase ) __lowerCAmelCase = self.dataset.features.arrow_schema __lowerCAmelCase = pq.ParquetWriter(__lowercase , schema=__lowercase , **__lowercase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __lowercase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): __lowerCAmelCase = query_table( table=self.dataset._data , key=slice(__lowercase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__lowercase ) written += batch.nbytes writer.close() return written
358
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : str = DebertaTokenizer __UpperCamelCase : str = True __UpperCamelCase : Any = DebertaTokenizerFast def _snake_case (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCAmelCase = {'''unk_token''': '''[UNK]'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) def _snake_case (self , **__lowercase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = '''lower newer''' return input_text, output_text def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowerCAmelCase = tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) __lowerCAmelCase = tokens + [tokenizer.unk_token] __lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = tokenizer('''Hello''' , '''World''' ) __lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __lowercase ) @slow def _snake_case (self ): __lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case (self ): __lowerCAmelCase = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] __lowerCAmelCase = tokenizer(__lowercase , padding=__lowercase ) __lowerCAmelCase = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']] # fmt: off __lowerCAmelCase = { '''input_ids''': [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __lowercase ) for expected, decoded in zip(__lowercase , __lowercase ): self.assertEqual(__lowercase , __lowercase )
9
0
'''simple docstring''' from math import sqrt def __magic_name__( lowerCamelCase): assert isinstance(a__, a__) and ( number >= 0 ), "'number' must been an int and positive" __lowerCAmelCase = True # 0 and 1 are none primes. if number <= 1: __lowerCAmelCase = False for divisor in range(2, int(round(sqrt(a__))) + 1): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __lowerCAmelCase = False break # precondition assert isinstance(a__, a__), "'status' must been from type bool" return status def __magic_name__( lowerCamelCase): assert isinstance(a__, a__) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __lowerCAmelCase = list(range(2, n + 1)) __lowerCAmelCase = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(a__)): for j in range(i + 1, len(a__)): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __lowerCAmelCase = 0 # filters actual prime numbers. __lowerCAmelCase = [x for x in begin_list if x != 0] # precondition assert isinstance(a__, a__), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(a__, a__) and (n > 2), "'N' must been an int and > 2" __lowerCAmelCase = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2, n + 1): if is_prime(a__): ans.append(a__) # precondition assert isinstance(a__, a__), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(a__, a__) and number >= 0, "'number' must been an int and >= 0" __lowerCAmelCase = [] # this list will be returns of the function. # potential prime number factors. __lowerCAmelCase = 2 __lowerCAmelCase = number if number == 0 or number == 1: ans.append(a__) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(a__): while quotient != 1: if is_prime(a__) and (quotient % factor == 0): ans.append(a__) quotient /= factor else: factor += 1 else: ans.append(a__) # precondition assert isinstance(a__, a__), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(a__, a__) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCAmelCase = 0 # prime factorization of 'number' __lowerCAmelCase = prime_factorization(a__) __lowerCAmelCase = max(a__) # precondition assert isinstance(a__, a__), "'ans' must been from type int" return ans def __magic_name__( lowerCamelCase): assert isinstance(a__, a__) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCAmelCase = 0 # prime factorization of 'number' __lowerCAmelCase = prime_factorization(a__) __lowerCAmelCase = min(a__) # precondition assert isinstance(a__, a__), "'ans' must been from type int" return ans def __magic_name__( lowerCamelCase): assert isinstance(a__, a__), "'number' must been an int" assert isinstance(number % 2 == 0, a__), "compare bust been from type bool" return number % 2 == 0 def __magic_name__( lowerCamelCase): assert isinstance(a__, a__), "'number' must been an int" assert isinstance(number % 2 != 0, a__), "compare bust been from type bool" return number % 2 != 0 def __magic_name__( lowerCamelCase): assert ( isinstance(a__, a__) and (number > 2) and is_even(a__) ), "'number' must been an int, even and > 2" __lowerCAmelCase = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __lowerCAmelCase = get_prime_numbers(a__) __lowerCAmelCase = len(a__) # run variable for while-loops. __lowerCAmelCase = 0 __lowerCAmelCase = None # exit variable. for break up the loops __lowerCAmelCase = True while i < len_pn and loop: __lowerCAmelCase = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __lowerCAmelCase = False ans.append(prime_numbers[i]) ans.append(prime_numbers[j]) j += 1 i += 1 # precondition assert ( isinstance(a__, a__) and (len(a__) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0]) and is_prime(ans[1]) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(a__, a__) and isinstance(a__, a__) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __lowerCAmelCase = 0 while numbera != 0: __lowerCAmelCase = numbera % numbera __lowerCAmelCase = numbera __lowerCAmelCase = rest # precondition assert isinstance(a__, a__) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(a__, a__) and isinstance(a__, a__) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __lowerCAmelCase = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __lowerCAmelCase = prime_factorization(a__) __lowerCAmelCase = prime_factorization(a__) elif numbera == 1 or numbera == 1: __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = max(a__, a__) __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __lowerCAmelCase = prime_fac_a.count(a__) __lowerCAmelCase = prime_fac_a.count(a__) for _ in range(max(a__, a__)): ans *= n else: __lowerCAmelCase = prime_fac_a.count(a__) for _ in range(a__): ans *= n done.append(a__) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __lowerCAmelCase = prime_fac_a.count(a__) for _ in range(a__): ans *= n done.append(a__) # precondition assert isinstance(a__, a__) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def __magic_name__( lowerCamelCase): assert isinstance(a__, a__) and (n >= 0), "'number' must been a positive int" __lowerCAmelCase = 0 __lowerCAmelCase = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(a__): ans += 1 # precondition assert isinstance(a__, a__) and is_prime( a__), "'ans' must been a prime number and from type int" return ans def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( is_prime(a__) and is_prime(a__) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __lowerCAmelCase = p_number_a + 1 # jump to the next number __lowerCAmelCase = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(a__): number += 1 while number < p_number_a: ans.append(a__) number += 1 # fetch the next prime number. while not is_prime(a__): number += 1 # precondition assert ( isinstance(a__, a__) and ans[0] != p_number_a and ans[len(a__) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def __magic_name__( lowerCamelCase): assert isinstance(a__, a__) and (n >= 1), "'n' must been int and >= 1" __lowerCAmelCase = [] # will be returned. for divisor in range(1, n + 1): if n % divisor == 0: ans.append(a__) # precondition assert ans[0] == 1 and ans[len(a__) - 1] == n, "Error in function getDivisiors(...)" return ans def __magic_name__( lowerCamelCase): assert isinstance(a__, a__) and ( number > 1 ), "'number' must been an int and >= 1" __lowerCAmelCase = get_divisors(a__) # precondition assert ( isinstance(a__, a__) and (divisors[0] == 1) and (divisors[len(a__) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1]) == number def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(a__, a__) and isinstance(a__, a__) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __lowerCAmelCase = gcd(abs(a__), abs(a__)) # precondition assert ( isinstance(a__, a__) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def __magic_name__( lowerCamelCase): assert isinstance(a__, a__) and (n >= 0), "'n' must been a int and >= 0" __lowerCAmelCase = 1 # this will be return. for factor in range(1, n + 1): ans *= factor return ans def __magic_name__( lowerCamelCase): assert isinstance(a__, a__) and (n >= 0), "'n' must been an int and >= 0" __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = 1 # this will be return for _ in range(n - 1): __lowerCAmelCase = ans ans += fiba __lowerCAmelCase = tmp return ans
359
'''simple docstring''' import argparse import datetime def __magic_name__( lowerCamelCase): __lowerCAmelCase = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } __lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowerCamelCase) < 1_1: raise ValueError('''Must be 10 characters long''') # Get month __lowerCAmelCase = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 1_3: raise ValueError('''Month must be between 1 - 12''') __lowerCAmelCase = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get day __lowerCAmelCase = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 3_2: raise ValueError('''Date must be between 1 - 31''') # Get second separator __lowerCAmelCase = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get year __lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 4_5 < y < 8_5_0_0: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''') # Get datetime obj for validation __lowerCAmelCase = datetime.date(int(lowerCamelCase), int(lowerCamelCase), int(lowerCamelCase)) # Start math if m <= 2: __lowerCAmelCase = y - 1 __lowerCAmelCase = m + 1_2 # maths var __lowerCAmelCase = int(str(lowerCamelCase)[:2]) __lowerCAmelCase = int(str(lowerCamelCase)[2:]) __lowerCAmelCase = int(2.6 * m - 5.39) __lowerCAmelCase = int(c / 4) __lowerCAmelCase = int(k / 4) __lowerCAmelCase = int(d + k) __lowerCAmelCase = int(t + u + v + x) __lowerCAmelCase = int(z - (2 * c)) __lowerCAmelCase = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''') # Response __lowerCAmelCase = F"""Your date {date_input}, is a {days[str(lowerCamelCase)]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : List[str] = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) _UpperCAmelCase : Dict = parser.parse_args() zeller(args.date_input)
9
0
'''simple docstring''' import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class a__ : """simple docstring""" def __init__(self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=16 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=3 , __lowercase=4 , __lowercase=None , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_input_mask __lowerCAmelCase = use_token_type_ids __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = num_labels __lowerCAmelCase = num_choices __lowerCAmelCase = scope def _snake_case (self ): __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_input_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase = None if self.use_token_type_ids: __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case (self ): return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = NystromformerModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) __lowerCAmelCase = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) __lowerCAmelCase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = NystromformerForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = NystromformerForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowerCAmelCase = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = self.num_labels __lowerCAmelCase = NystromformerForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = self.num_labels __lowerCAmelCase = NystromformerForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = self.num_choices __lowerCAmelCase = NystromformerForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case (self ): __lowerCAmelCase = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = config_and_inputs __lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class a__ ( __lowercase , __lowercase , unittest.TestCase ): """simple docstring""" __UpperCamelCase : int = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) __UpperCamelCase : Dict = ( { '''feature-extraction''': NystromformerModel, '''fill-mask''': NystromformerForMaskedLM, '''question-answering''': NystromformerForQuestionAnswering, '''text-classification''': NystromformerForSequenceClassification, '''token-classification''': NystromformerForTokenClassification, '''zero-shot''': NystromformerForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase : List[Any] = False __UpperCamelCase : Optional[Any] = False def _snake_case (self ): __lowerCAmelCase = NystromformerModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def _snake_case (self ): self.config_tester.run_common_tests() def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCAmelCase = type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) @slow def _snake_case (self ): for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = NystromformerModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch class a__ ( unittest.TestCase ): """simple docstring""" @slow def _snake_case (self ): __lowerCAmelCase = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' ) __lowerCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __lowerCAmelCase = model(UpperCAmelCase__ )[0] __lowerCAmelCase = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __lowerCAmelCase = torch.tensor( [[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) ) @slow def _snake_case (self ): __lowerCAmelCase = '''the [MASK] of Belgium is Brussels''' __lowerCAmelCase = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' ) __lowerCAmelCase = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' ) __lowerCAmelCase = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' ) with torch.no_grad(): __lowerCAmelCase = model(encoding.input_ids ).logits __lowerCAmelCase = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCAmelCase__ ) , '''capital''' )
360
'''simple docstring''' import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = ConsistencyModelPipeline __UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS __UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt __UpperCamelCase : List[Any] = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) @property def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def _snake_case (self , __lowercase=False ): if class_cond: __lowerCAmelCase = self.dummy_cond_unet else: __lowerCAmelCase = self.dummy_uncond_unet # Default to CM multistep sampler __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, } return components def _snake_case (self , __lowercase , __lowercase=0 ): if str(__lowercase ).startswith('''mps''' ): __lowerCAmelCase = torch.manual_seed(__lowercase ) else: __lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCAmelCase = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase ) __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 0 __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase ) __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = 0 __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case (self , __lowercase=0 , __lowercase=False , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ): __lowerCAmelCase = torch.manual_seed(__lowercase ) __lowerCAmelCase = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: __lowerCAmelCase = self.get_fixed_latents(seed=__lowercase , device=__lowercase , dtype=__lowercase , shape=__lowercase ) __lowerCAmelCase = latents return inputs def _snake_case (self , __lowercase=0 , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ): if type(__lowercase ) == str: __lowerCAmelCase = torch.device(__lowercase ) __lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase ) return latents def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs() __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs() __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ): __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ): __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
9
0
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class a__ ( UpperCamelCase_ ): """simple docstring""" __UpperCamelCase : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} ) __UpperCamelCase : ClassVar[Features] = Features({'audio': Audio()} ) __UpperCamelCase : ClassVar[Features] = Features({'transcription': Value('string' )} ) __UpperCamelCase : str = "audio" __UpperCamelCase : str = "transcription" def _snake_case (self , __lowercase ): if self.audio_column not in features: raise ValueError(F"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , _a ): raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" ) __lowerCAmelCase = copy.deepcopy(self ) __lowerCAmelCase = self.input_schema.copy() __lowerCAmelCase = features[self.audio_column] __lowerCAmelCase = input_schema return task_template @property def _snake_case (self ): return {self.audio_column: "audio", self.transcription_column: "transcription"}
361
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split _UpperCAmelCase : List[Any] = datasets.load_iris() _UpperCAmelCase : Dict = np.array(data["""data"""]) _UpperCAmelCase : int = np.array(data["""target"""]) _UpperCAmelCase : str = data["""target_names"""] _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = train_test_split(X, y) def __magic_name__( lowerCamelCase, lowerCamelCase): return np.linalg.norm(np.array(lowerCamelCase) - np.array(lowerCamelCase)) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=5): __lowerCAmelCase = zip(lowerCamelCase, lowerCamelCase) # List of distances of all points from the point to be classified __lowerCAmelCase = [] for data_point in data: __lowerCAmelCase = euclidean_distance(data_point[0], lowerCamelCase) distances.append((distance, data_point[1])) # Choosing 'k' points with the least distances. __lowerCAmelCase = [i[1] for i in sorted(lowerCamelCase)[:k]] # Most commonly occurring class among them # is the class into which the point is classified __lowerCAmelCase = Counter(lowerCamelCase).most_common(1)[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
9
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) _UpperCAmelCase : int = { 'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json', } class a__ ( _A ): """simple docstring""" __UpperCamelCase : Union[str, Any] = 'mra' def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=1 , __lowercase=0.0_2 , __lowercase=1e-5 , __lowercase="absolute" , __lowercase=4 , __lowercase="full" , __lowercase=0 , __lowercase=0 , __lowercase=1 , __lowercase=0 , __lowercase=2 , **__lowercase , ): super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = vocab_size __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = initializer_range __lowerCAmelCase = type_vocab_size __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = block_per_row __lowerCAmelCase = approx_mode __lowerCAmelCase = initial_prior_first_n_blocks __lowerCAmelCase = initial_prior_diagonal_n_blocks
362
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) __lowerCAmelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowercase , __lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase ) def _snake_case (self , **__lowercase ): return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase ) __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowercase ) self.assertIsInstance(processor_fast.tokenizer , __lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowercase ) self.assertIsInstance(processor_fast.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase ) __lowerCAmelCase = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' ) __lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__lowercase , return_tensors='''np''' ) __lowerCAmelCase = tokenizer(__lowercase , return_tensors='''np''' ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = ['''cat''', '''nasa badge'''] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = [['''cat''', '''nasa badge'''], ['''person''']] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 __lowerCAmelCase = len(__lowercase ) __lowerCAmelCase = max([len(__lowercase ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = ['''cat''', '''nasa badge'''] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 __lowerCAmelCase = inputs['''input_ids'''] __lowerCAmelCase = [ [4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(images=__lowercase , query_images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__lowercase ) __lowerCAmelCase = tokenizer.batch_decode(__lowercase ) self.assertListEqual(__lowercase , __lowercase )
9
0
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : Any = logging.get_logger() @dataclass class a__ : """simple docstring""" __UpperCamelCase : Optional[Any] = 42 __UpperCamelCase : str = field(default_factory=__SCREAMING_SNAKE_CASE ) __UpperCamelCase : str = field(default_factory=__SCREAMING_SNAKE_CASE ) def _snake_case (self , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(_SCREAMING_SNAKE_CASE , nn.BatchNormad ) if has_not_submodules: self.traced.append(_SCREAMING_SNAKE_CASE ) def __call__(self , __lowercase ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(_SCREAMING_SNAKE_CASE ) [x.remove() for x in self.handles] return self @property def _snake_case (self ): return list(filter(lambda __lowercase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class a__ : """simple docstring""" __UpperCamelCase : List[Any] = 42 __UpperCamelCase : Tuple = 42 __UpperCamelCase : Optional[Any] = 0 __UpperCamelCase : int = field(default_factory=__SCREAMING_SNAKE_CASE ) __UpperCamelCase : List[Any] = field(default_factory=__SCREAMING_SNAKE_CASE ) def __call__(self , __lowercase ): __lowerCAmelCase = Tracker(self.dest )(_SCREAMING_SNAKE_CASE ).parametrized __lowerCAmelCase = Tracker(self.src )(_SCREAMING_SNAKE_CASE ).parametrized __lowerCAmelCase = list(filter(lambda __lowercase : type(_SCREAMING_SNAKE_CASE ) not in self.src_skip , _SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = list(filter(lambda __lowercase : type(_SCREAMING_SNAKE_CASE ) not in self.dest_skip , _SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise Exception( F"""Numbers of operations are different. Source module has {len(_SCREAMING_SNAKE_CASE )} operations while""" F""" destination module has {len(_SCREAMING_SNAKE_CASE )}.""" ) for dest_m, src_m in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F"""Transfered from={src_m} to={dest_m}""" ) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = True): print(F"""Converting {name}...""") with torch.no_grad(): __lowerCAmelCase = timm.create_model(__snake_case, pretrained=__snake_case).eval() __lowerCAmelCase = ResNetForImageClassification(__snake_case).eval() __lowerCAmelCase = ModuleTransfer(src=__snake_case, dest=__snake_case) __lowerCAmelCase = torch.randn((1, 3, 2_2_4, 2_2_4)) module_transfer(__snake_case) assert torch.allclose(from_model(__snake_case), our_model(__snake_case).logits), "The model logits don't match the original one." __lowerCAmelCase = F"""resnet{"-".join(name.split("resnet"))}""" print(__snake_case) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name, commit_message='''Add model''', use_temp_dir=__snake_case, ) # we can use the convnext one __lowerCAmelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''') image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name, commit_message='''Add image processor''', use_temp_dir=__snake_case, ) print(F"""Pushed {checkpoint_name}""") def __magic_name__( lowerCamelCase, lowerCamelCase = None, lowerCamelCase = True): __lowerCAmelCase = "imagenet-1k-id2label.json" __lowerCAmelCase = 1_0_0_0 __lowerCAmelCase = (1, num_labels) __lowerCAmelCase = "huggingface/label-files" __lowerCAmelCase = num_labels __lowerCAmelCase = json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type='''dataset'''), '''r''')) __lowerCAmelCase = {int(__snake_case): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} __lowerCAmelCase = partial(__snake_case, num_labels=__snake_case, idalabel=__snake_case, labelaid=__snake_case) __lowerCAmelCase = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2], hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2], layer_type='''basic'''), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2], hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8], layer_type='''bottleneck'''), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3], hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2], layer_type='''basic'''), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3], hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8], layer_type='''bottleneck'''), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 2_3, 3], hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8], layer_type='''bottleneck'''), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 3_6, 3], hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8], layer_type='''bottleneck'''), } if model_name: convert_weight_and_push(__snake_case, names_to_config[model_name], __snake_case, __snake_case) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__snake_case, __snake_case, __snake_case, __snake_case) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _UpperCAmelCase : int = parser.parse_args() _UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
363
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __magic_name__( ): __lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)] __lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0) return (arr, r) _UpperCAmelCase : Dict = make_dataset() def __magic_name__( lowerCamelCase, lowerCamelCase): for triplet in permutations(lowerCamelCase, 3): if sum(lowerCamelCase) == target: return tuple(sorted(lowerCamelCase)) return (0, 0, 0) def __magic_name__( lowerCamelCase, lowerCamelCase): arr.sort() __lowerCAmelCase = len(lowerCamelCase) for i in range(n - 1): __lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __magic_name__( ): __lowerCAmelCase = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' __lowerCAmelCase = ''' triplet_sum1(*dataset) ''' __lowerCAmelCase = ''' triplet_sum2(*dataset) ''' __lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0) __lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0) return (min(lowerCamelCase), min(lowerCamelCase)) if __name__ == "__main__": from doctest import testmod testmod() _UpperCAmelCase : Union[str, Any] = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
9
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) _UpperCAmelCase : int = { """configuration_speecht5""": [ """SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""", """SpeechT5Config""", """SpeechT5HifiGanConfig""", ], """feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""], """processing_speecht5""": ["""SpeechT5Processor"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[Any] = ["""SpeechT5Tokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : List[str] = [ """SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """SpeechT5ForSpeechToText""", """SpeechT5ForSpeechToSpeech""", """SpeechT5ForTextToSpeech""", """SpeechT5Model""", """SpeechT5PreTrainedModel""", """SpeechT5HifiGan""", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys _UpperCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
364
'''simple docstring''' import numpy as np def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1E-12, lowerCamelCase = 1_0_0, ): assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[1] # Ensure proper dimensionality. assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(lowerCamelCase) == np.iscomplexobj(lowerCamelCase) __lowerCAmelCase = np.iscomplexobj(lowerCamelCase) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(lowerCamelCase, input_matrix.conj().T) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __lowerCAmelCase = False __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 1E12 while not convergence: # Multiple matrix by the vector. __lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase) # Normalize the resulting output vector. __lowerCAmelCase = w / np.linalg.norm(lowerCamelCase) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __lowerCAmelCase = vector.conj().T if is_complex else vector.T __lowerCAmelCase = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase)) # Check convergence. __lowerCAmelCase = np.abs(lambda_ - lambda_previous) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __lowerCAmelCase = True __lowerCAmelCase = lambda_ if is_complex: __lowerCAmelCase = np.real(lambda_) return lambda_, vector def __magic_name__( ): __lowerCAmelCase = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]]) __lowerCAmelCase = np.array([4_1, 4, 2_0]) __lowerCAmelCase = real_input_matrix.astype(np.complexaaa) __lowerCAmelCase = np.triu(1J * complex_input_matrix, 1) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __lowerCAmelCase = np.array([4_1, 4, 2_0]).astype(np.complexaaa) for problem_type in ["real", "complex"]: if problem_type == "real": __lowerCAmelCase = real_input_matrix __lowerCAmelCase = real_vector elif problem_type == "complex": __lowerCAmelCase = complex_input_matrix __lowerCAmelCase = complex_vector # Our implementation. __lowerCAmelCase , __lowerCAmelCase = power_iteration(lowerCamelCase, lowerCamelCase) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(lowerCamelCase) # Last eigenvalue is the maximum one. __lowerCAmelCase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __lowerCAmelCase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(lowerCamelCase) - np.abs(lowerCamelCase)) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
9
0
'''simple docstring''' def __magic_name__( lowerCamelCase): __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_) __lowerCAmelCase = len(matrix[0]) __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_) for row in range(SCREAMING_SNAKE_CASE_): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1, SCREAMING_SNAKE_CASE_): __lowerCAmelCase = matrix[col][row] / matrix[row][row] for i in range(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows __lowerCAmelCase = True for i in range(row + 1, SCREAMING_SNAKE_CASE_): if matrix[i][row] != 0: __lowerCAmelCase , __lowerCAmelCase = matrix[i], matrix[row] __lowerCAmelCase = False break if reduce: rank -= 1 for i in range(SCREAMING_SNAKE_CASE_): __lowerCAmelCase = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
365
'''simple docstring''' from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract _UpperCAmelCase : str = logging.get_logger(__name__) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): return [ int(1_0_0_0 * (box[0] / width)), int(1_0_0_0 * (box[1] / height)), int(1_0_0_0 * (box[2] / width)), int(1_0_0_0 * (box[3] / height)), ] def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = None): __lowerCAmelCase = tesseract_config if tesseract_config is not None else '''''' # apply OCR __lowerCAmelCase = to_pil_image(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = pil_image.size __lowerCAmelCase = pytesseract.image_to_data(lowerCamelCase, lang=lowerCamelCase, output_type='''dict''', config=lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates __lowerCAmelCase = [idx for idx, word in enumerate(lowerCamelCase) if not word.strip()] __lowerCAmelCase = [word for idx, word in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format __lowerCAmelCase = [] for x, y, w, h in zip(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [x, y, x + w, y + h] actual_boxes.append(lowerCamelCase) # finally, normalize the bounding boxes __lowerCAmelCase = [] for box in actual_boxes: normalized_boxes.append(normalize_box(lowerCamelCase, lowerCamelCase, lowerCamelCase)) assert len(lowerCamelCase) == len(lowerCamelCase), "Not as many words as there are bounding boxes" return words, normalized_boxes class a__ ( __A ): """simple docstring""" __UpperCamelCase : str = ['pixel_values'] def __init__(self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ): super().__init__(**__lowercase ) __lowerCAmelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24} __lowerCAmelCase = get_size_dict(__lowercase ) __lowerCAmelCase = do_resize __lowerCAmelCase = size __lowerCAmelCase = resample __lowerCAmelCase = apply_ocr __lowerCAmelCase = ocr_lang __lowerCAmelCase = tesseract_config def _snake_case (self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ): __lowerCAmelCase = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __lowerCAmelCase = (size['''height'''], size['''width''']) return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ): __lowerCAmelCase = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase = size if size is not None else self.size __lowerCAmelCase = get_size_dict(__lowercase ) __lowerCAmelCase = resample if resample is not None else self.resample __lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr __lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang __lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config __lowerCAmelCase = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) # All transformations expect numpy arrays. __lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images] if apply_ocr: requires_backends(self , '''pytesseract''' ) __lowerCAmelCase = [] __lowerCAmelCase = [] for image in images: __lowerCAmelCase , __lowerCAmelCase = apply_tesseract(__lowercase , __lowercase , __lowercase ) words_batch.append(__lowercase ) boxes_batch.append(__lowercase ) if do_resize: __lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) __lowerCAmelCase = [flip_channel_order(__lowercase ) for image in images] __lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] __lowerCAmelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase ) if apply_ocr: __lowerCAmelCase = words_batch __lowerCAmelCase = boxes_batch return data
9
0
import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) def __magic_name__( ): __lowerCAmelCase = os.getenv('''SM_HP_MP_PARAMETERS''', '''{}''') try: # Parse it and check the field "partitions" is included, it is required for model parallel. __lowerCAmelCase = json.loads(SCREAMING_SNAKE_CASE__) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. __lowerCAmelCase = os.getenv('''SM_FRAMEWORK_PARAMS''', '''{}''') try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". __lowerCAmelCase = json.loads(SCREAMING_SNAKE_CASE__) if not mpi_options.get('''sagemaker_mpi_enabled''', SCREAMING_SNAKE_CASE__): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('''smdistributed''') is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class a__ ( lowerCamelCase_ ): """simple docstring""" __UpperCamelCase : str = field( default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , ) def _snake_case (self ): super().__post_init__() warnings.warn( '''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ''' '''`TrainingArguments` instead.''' , _UpperCAmelCase , ) @cached_property def _snake_case (self ): logger.info('''PyTorch: setting up devices''' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( '''torch.distributed process group is initialized, but local_rank == -1. ''' '''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' ) if self.no_cuda: __lowerCAmelCase = torch.device('''cpu''' ) __lowerCAmelCase = 0 elif is_sagemaker_model_parallel_available(): __lowerCAmelCase = smp.local_rank() __lowerCAmelCase = torch.device('''cuda''' , _UpperCAmelCase ) __lowerCAmelCase = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta ) __lowerCAmelCase = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) ) __lowerCAmelCase = torch.device('''cuda''' , self.local_rank ) __lowerCAmelCase = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 __lowerCAmelCase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. __lowerCAmelCase = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta ) __lowerCAmelCase = torch.device('''cuda''' , self.local_rank ) __lowerCAmelCase = 1 if device.type == "cuda": torch.cuda.set_device(_UpperCAmelCase ) return device @property def _snake_case (self ): if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def _snake_case (self ): return not is_sagemaker_model_parallel_available() @property def _snake_case (self ): return False
366
'''simple docstring''' from ..utils import DummyObject, requires_backends class a__ ( metaclass=__A ): """simple docstring""" __UpperCamelCase : int = ['torch', 'scipy'] def __init__(self , *__lowercase , **__lowercase ): requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''torch''', '''scipy'''] )
9
0
'''simple docstring''' import math def __magic_name__( lowerCamelCase): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(lowerCamelCase) + 1), 6): if number % i == 0 or number % (i + 2) == 0: return False return True def __magic_name__( lowerCamelCase = 0.1): __lowerCAmelCase = 3 __lowerCAmelCase = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1): primes += is_prime(lowerCamelCase) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
367
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class a__ ( unittest.TestCase ): """simple docstring""" def __init__(self , __lowercase , __lowercase = True , __lowercase = None , __lowercase = 32 , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = True , __lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowercase = True , __lowercase=7 , __lowercase=30 , __lowercase=4_00 , __lowercase=3 , ): __lowerCAmelCase = parent __lowerCAmelCase = do_resize __lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_88} __lowerCAmelCase = size_divisor __lowerCAmelCase = do_rescale __lowerCAmelCase = rescale_factor __lowerCAmelCase = do_normalize __lowerCAmelCase = do_center_crop __lowerCAmelCase = image_mean __lowerCAmelCase = image_std __lowerCAmelCase = do_pad __lowerCAmelCase = batch_size __lowerCAmelCase = num_channels __lowerCAmelCase = min_resolution __lowerCAmelCase = max_resolution def _snake_case (self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _snake_case (self , __lowercase , __lowercase=False ): if not batched: __lowerCAmelCase = self.size['''shortest_edge'''] __lowerCAmelCase = image_inputs[0] if isinstance(__lowercase , Image.Image ): __lowerCAmelCase , __lowerCAmelCase = image.size else: __lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2] __lowerCAmelCase = size / min(__lowercase , __lowercase ) if h < w: __lowerCAmelCase , __lowerCAmelCase = size, scale * w else: __lowerCAmelCase , __lowerCAmelCase = scale * h, size __lowerCAmelCase = int((13_33 / 8_00) * size ) if max(__lowercase , __lowercase ) > max_size: __lowerCAmelCase = max_size / max(__lowercase , __lowercase ) __lowerCAmelCase = newh * scale __lowerCAmelCase = neww * scale __lowerCAmelCase , __lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 ) __lowerCAmelCase , __lowerCAmelCase = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __lowerCAmelCase = [] for image in image_inputs: __lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[0] )[0] __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None def _snake_case (self ): __lowerCAmelCase = BridgeTowerImageProcessingTester(self ) @property def _snake_case (self ): return self.image_processor_tester.prepare_image_processor_dict() def _snake_case (self ): __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowercase , '''image_std''' ) ) self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowercase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowercase , '''size''' ) ) self.assertTrue(hasattr(__lowercase , '''size_divisor''' ) ) def _snake_case (self ): pass def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , Image.Image ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , np.ndarray ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , torch.Tensor ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
9
0
'''simple docstring''' def __magic_name__( lowerCamelCase = 2_0_0): __lowerCAmelCase = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0] __lowerCAmelCase = [0] * (pence + 1) __lowerCAmelCase = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(a__, pence + 1, 1): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_0_0) == 7_3_6_8_2
368
'''simple docstring''' # Imports import numpy as np class a__ : """simple docstring""" def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase ) def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): if red is not None: __lowerCAmelCase = red if green is not None: __lowerCAmelCase = green if blue is not None: __lowerCAmelCase = blue if red_edge is not None: __lowerCAmelCase = red_edge if nir is not None: __lowerCAmelCase = nir return True def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase ) __lowerCAmelCase = { '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('''Index not in the list!''' ) return False def _snake_case (self ): return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def _snake_case (self ): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _snake_case (self ): return self.nir * (self.red / (self.green**2)) def _snake_case (self ): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _snake_case (self ): return (self.nir - self.red) / (self.nir + self.red) def _snake_case (self ): return (self.nir - self.blue) / (self.nir + self.blue) def _snake_case (self ): return (self.redEdge - self.red) / (self.redEdge + self.red) def _snake_case (self ): return (self.nir - self.green) / (self.nir + self.green) def _snake_case (self ): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _snake_case (self ): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _snake_case (self ): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _snake_case (self ): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _snake_case (self ): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _snake_case (self ): return (self.nir / self.green) - 1 def _snake_case (self ): return (self.nir / self.redEdge) - 1 def _snake_case (self ): return (self.red - self.blue) / self.red def _snake_case (self ): __lowerCAmelCase = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _snake_case (self ): return self.nir - self.green def _snake_case (self ): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _snake_case (self ): __lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def _snake_case (self , __lowercase=0.1_6 ): return (self.nir - self.green) / (self.nir + self.green + y) def _snake_case (self , __lowercase=0.5 ): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _snake_case (self ): return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def _snake_case (self , __lowercase=None , __lowercase=None ): return (self.nir - b) / (a * self.red) def _snake_case (self ): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _snake_case (self ): return (self.red + self.green + self.blue) / 3_0.5 def _snake_case (self ): return self.nir / self.red def _snake_case (self ): return (self.rvi() - 1) / (self.rvi() + 1) def _snake_case (self ): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _snake_case (self ): return self.green / (self.nir + self.red + self.green) def _snake_case (self ): return self.nir / (self.nir + self.red + self.green) def _snake_case (self ): return self.red / (self.nir + self.red + self.green) def _snake_case (self ): return (self.green - self.red) / (self.green + self.red) def _snake_case (self ): return (self.red - self.green) / (self.red + self.green) def _snake_case (self ): __lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) __lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _snake_case (self ): return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _snake_case (self ): return self.nir / self.red def _snake_case (self ): return (self.ndvi() + 0.5) ** (1 / 2) def _snake_case (self ): return (self.nir - self.redEdge) / (self.nir + self.redEdge)
9
0
'''simple docstring''' import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class a__ ( a__ ): """simple docstring""" __UpperCamelCase : List[str] = ComputeEnvironment.AMAZON_SAGEMAKER __UpperCamelCase : Optional[Any] = True __UpperCamelCase : str = 'ml.p3.2xlarge' __UpperCamelCase : Any = 'accelerate_sagemaker_execution_role' __UpperCamelCase : Union[str, Any] = 'hf-sm' __UpperCamelCase : Optional[int] = 'us-east-1' __UpperCamelCase : Dict = 1 __UpperCamelCase : str = 'accelerate-sagemaker-1' __UpperCamelCase : Tuple = '1.6' __UpperCamelCase : Any = '4.4' __UpperCamelCase : Dict = 'train.py' __UpperCamelCase : List[Any] = [ '--model_name_or_path', 'bert', '--do_train', 'False', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] __UpperCamelCase : Any = [ '--model_name_or_path', 'bert', '--do_train', '--do_test', 'False', '--do_predict', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): # If no defaults are changed, `to_kwargs` returns an empty dict. __lowerCAmelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['''model_name_or_path'''] , __lowercase ) assert isinstance(converted_args['''do_train'''] , __lowercase ) assert isinstance(converted_args['''epochs'''] , __lowercase ) assert isinstance(converted_args['''learning_rate'''] , __lowercase ) assert isinstance(converted_args['''max_steps'''] , __lowercase ) with pytest.raises(__lowercase ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
369
'''simple docstring''' from math import sqrt def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' must been an int and positive" __lowerCAmelCase = True # 0 and 1 are none primes. if number <= 1: __lowerCAmelCase = False for divisor in range(2, int(round(sqrt(lowerCamelCase))) + 1): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __lowerCAmelCase = False break # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'status' must been from type bool" return status def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __lowerCAmelCase = list(range(2, n + 1)) __lowerCAmelCase = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCamelCase)): for j in range(i + 1, len(lowerCamelCase)): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __lowerCAmelCase = 0 # filters actual prime numbers. __lowerCAmelCase = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2" __lowerCAmelCase = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2, n + 1): if is_prime(lowerCamelCase): ans.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and number >= 0, "'number' must been an int and >= 0" __lowerCAmelCase = [] # this list will be returns of the function. # potential prime number factors. __lowerCAmelCase = 2 __lowerCAmelCase = number if number == 0 or number == 1: ans.append(lowerCamelCase) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCamelCase): while quotient != 1: if is_prime(lowerCamelCase) and (quotient % factor == 0): ans.append(lowerCamelCase) quotient /= factor else: factor += 1 else: ans.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCAmelCase = 0 # prime factorization of 'number' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = max(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCAmelCase = 0 # prime factorization of 'number' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = min(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int" assert isinstance(number % 2 == 0, lowerCamelCase), "compare bust been from type bool" return number % 2 == 0 def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int" assert isinstance(number % 2 != 0, lowerCamelCase), "compare bust been from type bool" return number % 2 != 0 def __magic_name__( lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and (number > 2) and is_even(lowerCamelCase) ), "'number' must been an int, even and > 2" __lowerCAmelCase = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __lowerCAmelCase = get_prime_numbers(lowerCamelCase) __lowerCAmelCase = len(lowerCamelCase) # run variable for while-loops. __lowerCAmelCase = 0 __lowerCAmelCase = None # exit variable. for break up the loops __lowerCAmelCase = True while i < len_pn and loop: __lowerCAmelCase = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __lowerCAmelCase = False ans.append(prime_numbers[i]) ans.append(prime_numbers[j]) j += 1 i += 1 # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (len(lowerCamelCase) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0]) and is_prime(ans[1]) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __lowerCAmelCase = 0 while numbera != 0: __lowerCAmelCase = numbera % numbera __lowerCAmelCase = numbera __lowerCAmelCase = rest # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __lowerCAmelCase = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = prime_factorization(lowerCamelCase) elif numbera == 1 or numbera == 1: __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = max(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(max(lowerCamelCase, lowerCamelCase)): ans *= n else: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(lowerCamelCase): ans *= n done.append(lowerCamelCase) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(lowerCamelCase): ans *= n done.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'number' must been a positive int" __lowerCAmelCase = 0 __lowerCAmelCase = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCamelCase): ans += 1 # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and is_prime( lowerCamelCase), "'ans' must been a prime number and from type int" return ans def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( is_prime(lowerCamelCase) and is_prime(lowerCamelCase) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __lowerCAmelCase = p_number_a + 1 # jump to the next number __lowerCAmelCase = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCamelCase): number += 1 while number < p_number_a: ans.append(lowerCamelCase) number += 1 # fetch the next prime number. while not is_prime(lowerCamelCase): number += 1 # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and ans[0] != p_number_a and ans[len(lowerCamelCase) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 1), "'n' must been int and >= 1" __lowerCAmelCase = [] # will be returned. for divisor in range(1, n + 1): if n % divisor == 0: ans.append(lowerCamelCase) # precondition assert ans[0] == 1 and ans[len(lowerCamelCase) - 1] == n, "Error in function getDivisiors(...)" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number > 1 ), "'number' must been an int and >= 1" __lowerCAmelCase = get_divisors(lowerCamelCase) # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (divisors[0] == 1) and (divisors[len(lowerCamelCase) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1]) == number def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __lowerCAmelCase = gcd(abs(lowerCamelCase), abs(lowerCamelCase)) # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been a int and >= 0" __lowerCAmelCase = 1 # this will be return. for factor in range(1, n + 1): ans *= factor return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been an int and >= 0" __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = 1 # this will be return for _ in range(n - 1): __lowerCAmelCase = ans ans += fiba __lowerCAmelCase = tmp return ans
9
0
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def __magic_name__( lowerCamelCase): __lowerCAmelCase = filter(lambda lowerCamelCase: p.requires_grad, model.parameters()) __lowerCAmelCase = sum([np.prod(p.size()) for p in model_parameters]) return params _UpperCAmelCase : List[Any] = logging.getLogger(__name__) def __magic_name__( lowerCamelCase, lowerCamelCase): if metric == "rouge2": __lowerCAmelCase = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": __lowerCAmelCase = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": __lowerCAmelCase = '''{val_avg_em:.4f}-{step_count}''' else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" ''' function.''') __lowerCAmelCase = ModelCheckpoint( dirpath=lowerCamelCase, filename=lowerCamelCase, monitor=F"""val_{metric}""", mode='''max''', save_top_k=3, every_n_epochs=1, ) return checkpoint_callback def __magic_name__( lowerCamelCase, lowerCamelCase): return EarlyStopping( monitor=F"""val_{metric}""", mode='''min''' if '''loss''' in metric else '''max''', patience=lowerCamelCase, verbose=lowerCamelCase, ) class a__ ( pl.Callback ): """simple docstring""" def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE ) @rank_zero_only def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase=True ): logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) __lowerCAmelCase = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results __lowerCAmelCase = Path(pl_module.hparams.output_dir ) if type_path == "test": __lowerCAmelCase = od / '''test_results.txt''' __lowerCAmelCase = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __lowerCAmelCase = od / F"""{type_path}_results/{trainer.global_step:05d}.txt""" __lowerCAmelCase = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) generations_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , '''a+''' ) as writer: for key in sorted(_SCREAMING_SNAKE_CASE ): if key in ["log", "progress_bar", "preds"]: continue __lowerCAmelCase = metrics[key] if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): __lowerCAmelCase = val.item() __lowerCAmelCase = F"""{key}: {val:.6f}\n""" writer.write(_SCREAMING_SNAKE_CASE ) if not save_generations: return if "preds" in metrics: __lowerCAmelCase = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(_SCREAMING_SNAKE_CASE ) @rank_zero_only def _snake_case (self , __lowercase , __lowercase ): try: __lowerCAmelCase = pl_module.model.model.num_parameters() except AttributeError: __lowerCAmelCase = pl_module.model.num_parameters() __lowerCAmelCase = count_trainable_parameters(_SCREAMING_SNAKE_CASE ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} ) @rank_zero_only def _snake_case (self , __lowercase , __lowercase ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''test''' ) @rank_zero_only def _snake_case (self , __lowercase , __lowercase ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
370
'''simple docstring''' import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed _UpperCAmelCase : Dict = """true""" def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6): set_seed(4_2) __lowerCAmelCase = RegressionModel() __lowerCAmelCase = deepcopy(lowerCamelCase) __lowerCAmelCase = RegressionDataset(length=lowerCamelCase) __lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase) model.to(accelerator.device) __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase) return model, ddp_model, dataloader def __magic_name__( lowerCamelCase, lowerCamelCase=False): __lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''') __lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''') def tokenize_function(lowerCamelCase): __lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase) return outputs with accelerator.main_process_first(): __lowerCAmelCase = dataset.map( lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) __lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''') def collate_fn(lowerCamelCase): if use_longest: return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''') return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''') return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6) def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase) __lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [] for batch in dataloader: __lowerCAmelCase , __lowerCAmelCase = batch.values() with torch.no_grad(): __lowerCAmelCase = model(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) __lowerCAmelCase , __lowerCAmelCase = [], [] for logit, targ in logits_and_targets: logits.append(lowerCamelCase) targs.append(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase) return logits, targs def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase) assert ( len(lowerCamelCase) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}""" def __magic_name__( lowerCamelCase = False, lowerCamelCase = False): __lowerCAmelCase = evaluate.load('''glue''', '''mrpc''') __lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase) # First do baseline __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no'''] model.to(lowerCamelCase) model.eval() for batch in dataloader: batch.to(lowerCamelCase) with torch.inference_mode(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=lowerCamelCase, references=batch['''labels''']) __lowerCAmelCase = metric.compute() # Then do distributed __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) __lowerCAmelCase = batch['''labels'''] __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase) __lowerCAmelCase = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def __magic_name__( ): __lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''') for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""") test_mrpc(lowerCamelCase, lowerCamelCase) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''') for split_batches in [True, False]: for dispatch_batches in [True, False]: __lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""") test_torch_metrics(lowerCamelCase, 9_9) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''') __lowerCAmelCase = Accelerator() test_torch_metrics(lowerCamelCase, 5_1_2) accelerator.state._reset_state() def __magic_name__( lowerCamelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
9
0
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class a__ : """simple docstring""" def __init__(self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=50 , __lowercase=0.0_2 , __lowercase=True , __lowercase=None , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_input_mask __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = initializer_range __lowerCAmelCase = use_labels __lowerCAmelCase = scope def _snake_case (self ): __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_input_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = self.get_config() return config, input_ids, input_mask, token_labels def _snake_case (self ): return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__lowercase , initializer_range=self.initializer_range , ) def _snake_case (self ): ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = self.prepare_config_and_inputs() __lowerCAmelCase = True __lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase , ): __lowerCAmelCase = BertGenerationEncoder(config=__lowercase ) model.to(__lowercase ) model.eval() __lowerCAmelCase = model(__lowercase , attention_mask=__lowercase ) __lowerCAmelCase = model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase , ): __lowerCAmelCase = True __lowerCAmelCase = BertGenerationEncoder(config=__lowercase ) model.to(__lowercase ) model.eval() __lowerCAmelCase = model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , ) __lowerCAmelCase = model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase , ): __lowerCAmelCase = True __lowerCAmelCase = True __lowerCAmelCase = BertGenerationDecoder(config=__lowercase ).to(__lowercase ).eval() # first forward pass __lowerCAmelCase = model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , use_cache=__lowercase , ) __lowerCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) __lowerCAmelCase = model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_hidden_states=__lowercase , )['''hidden_states'''][0] __lowerCAmelCase = model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )['''hidden_states'''][0] # select random slice __lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() __lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-3 ) ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase , ): __lowerCAmelCase = BertGenerationDecoder(__lowercase ) model.to(__lowercase ) model.eval() __lowerCAmelCase = model(__lowercase , attention_mask=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case (self ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class a__ ( a_ , a_ , a_ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __UpperCamelCase : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else () __UpperCamelCase : List[str] = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def _snake_case (self ): __lowerCAmelCase = BertGenerationEncoderTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=__lowercase , hidden_size=37 ) def _snake_case (self ): self.config_tester.run_common_tests() def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def _snake_case (self ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() __lowerCAmelCase = '''bert''' self.model_tester.create_and_check_model(__lowercase , __lowercase , __lowercase , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__lowercase ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowercase ) def _snake_case (self ): ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __lowerCAmelCase = None self.model_tester.create_and_check_model_as_decoder( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__lowercase ) @slow def _snake_case (self ): __lowerCAmelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(__lowercase ) @require_torch class a__ ( unittest.TestCase ): """simple docstring""" @slow def _snake_case (self ): __lowerCAmelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) __lowerCAmelCase = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] ) with torch.no_grad(): __lowerCAmelCase = model(__lowercase )[0] __lowerCAmelCase = torch.Size([1, 8, 10_24] ) self.assertEqual(output.shape , __lowercase ) __lowerCAmelCase = torch.tensor( [[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1e-4 ) ) @require_torch class a__ ( unittest.TestCase ): """simple docstring""" @slow def _snake_case (self ): __lowerCAmelCase = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) __lowerCAmelCase = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] ) with torch.no_grad(): __lowerCAmelCase = model(__lowercase )[0] __lowerCAmelCase = torch.Size([1, 8, 5_03_58] ) self.assertEqual(output.shape , __lowercase ) __lowerCAmelCase = torch.tensor( [[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1e-4 ) )
371
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : str = logging.get_logger(__name__) _UpperCAmelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class a__ ( __A ): """simple docstring""" __UpperCamelCase : str = 'roberta' def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ): super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = use_cache __lowerCAmelCase = classifier_dropout class a__ ( __A ): """simple docstring""" @property def _snake_case (self ): if self.task == "multiple-choice": __lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
9
0
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : Any = logging.get_logger() def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = True): print(F"""Converting {name}...""") with torch.no_grad(): if hidden_sizes == 1_2_8: if name[-1] == "S": __lowerCAmelCase = timm.create_model('''levit_128s''', pretrained=_SCREAMING_SNAKE_CASE) else: __lowerCAmelCase = timm.create_model('''levit_128''', pretrained=_SCREAMING_SNAKE_CASE) if hidden_sizes == 1_9_2: __lowerCAmelCase = timm.create_model('''levit_192''', pretrained=_SCREAMING_SNAKE_CASE) if hidden_sizes == 2_5_6: __lowerCAmelCase = timm.create_model('''levit_256''', pretrained=_SCREAMING_SNAKE_CASE) if hidden_sizes == 3_8_4: __lowerCAmelCase = timm.create_model('''levit_384''', pretrained=_SCREAMING_SNAKE_CASE) from_model.eval() __lowerCAmelCase = LevitForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE).eval() __lowerCAmelCase = OrderedDict() __lowerCAmelCase = from_model.state_dict() __lowerCAmelCase = list(from_model.state_dict().keys()) __lowerCAmelCase = list(our_model.state_dict().keys()) print(len(_SCREAMING_SNAKE_CASE), len(_SCREAMING_SNAKE_CASE)) for i in range(len(_SCREAMING_SNAKE_CASE)): __lowerCAmelCase = weights[og_keys[i]] our_model.load_state_dict(_SCREAMING_SNAKE_CASE) __lowerCAmelCase = torch.randn((2, 3, 2_2_4, 2_2_4)) __lowerCAmelCase = from_model(_SCREAMING_SNAKE_CASE) __lowerCAmelCase = our_model(_SCREAMING_SNAKE_CASE).logits assert torch.allclose(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE), "The model logits don't match the original one." __lowerCAmelCase = name print(_SCREAMING_SNAKE_CASE) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name) __lowerCAmelCase = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name) print(F"""Pushed {checkpoint_name}""") def __magic_name__( lowerCamelCase, lowerCamelCase = None, lowerCamelCase = True): __lowerCAmelCase = "imagenet-1k-id2label.json" __lowerCAmelCase = 1_0_0_0 __lowerCAmelCase = (1, num_labels) __lowerCAmelCase = "huggingface/label-files" __lowerCAmelCase = num_labels __lowerCAmelCase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, repo_type='''dataset'''), '''r''')) __lowerCAmelCase = {int(_SCREAMING_SNAKE_CASE): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} __lowerCAmelCase = partial(_SCREAMING_SNAKE_CASE, num_labels=_SCREAMING_SNAKE_CASE, idalabel=_SCREAMING_SNAKE_CASE, labelaid=_SCREAMING_SNAKE_CASE) __lowerCAmelCase = { "levit-128S": 1_2_8, "levit-128": 1_2_8, "levit-192": 1_9_2, "levit-256": 2_5_6, "levit-384": 3_8_4, } __lowerCAmelCase = { "levit-128S": ImageNetPreTrainedConfig( hidden_sizes=[1_2_8, 2_5_6, 3_8_4], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[1_6, 1_6, 1_6], drop_path_rate=0, ), "levit-128": ImageNetPreTrainedConfig( hidden_sizes=[1_2_8, 2_5_6, 3_8_4], num_attention_heads=[4, 8, 1_2], depths=[4, 4, 4], key_dim=[1_6, 1_6, 1_6], drop_path_rate=0, ), "levit-192": ImageNetPreTrainedConfig( hidden_sizes=[1_9_2, 2_8_8, 3_8_4], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[3_2, 3_2, 3_2], drop_path_rate=0, ), "levit-256": ImageNetPreTrainedConfig( hidden_sizes=[2_5_6, 3_8_4, 5_1_2], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[3_2, 3_2, 3_2], drop_path_rate=0, ), "levit-384": ImageNetPreTrainedConfig( hidden_sizes=[3_8_4, 5_1_2, 7_6_8], num_attention_heads=[6, 9, 1_2], depths=[4, 4, 4], key_dim=[3_2, 3_2, 3_2], drop_path_rate=0.1, ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name], _SCREAMING_SNAKE_CASE, names_to_config[model_name], _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name], _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) _UpperCAmelCase : Dict = parser.parse_args() _UpperCAmelCase : Tuple = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
350
'''simple docstring''' import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = old_name if "patch_embed" in old_name: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_name.split('''.''') if layer == "0": __lowerCAmelCase = old_name.replace('''0''', '''convolution1''') elif layer == "1": __lowerCAmelCase = old_name.replace('''1''', '''batchnorm_before''') elif layer == "3": __lowerCAmelCase = old_name.replace('''3''', '''convolution2''') else: __lowerCAmelCase = old_name.replace('''4''', '''batchnorm_after''') if "network" in old_name and re.search(r'''\d\.\d''', lowerCamelCase): __lowerCAmelCase = r'''\b\d{2}\b''' if bool(re.search(lowerCamelCase, lowerCamelCase)): __lowerCAmelCase = re.search(r'''\d\.\d\d.''', lowerCamelCase).group() else: __lowerCAmelCase = re.search(r'''\d\.\d.''', lowerCamelCase).group() if int(match[0]) < 6: __lowerCAmelCase = old_name.replace(lowerCamelCase, '''''') __lowerCAmelCase = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1]) __lowerCAmelCase = '''intermediate_stages.''' + trimmed_name else: __lowerCAmelCase = old_name.replace(lowerCamelCase, '''''') if int(match[2]) < num_meta4D_last_stage: __lowerCAmelCase = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2]) else: __lowerCAmelCase = str(int(match[2]) - num_meta4D_last_stage) __lowerCAmelCase = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index) if "norm1" in old_name: __lowerCAmelCase = trimmed_name.replace('''norm1''', '''layernorm1''') elif "norm2" in old_name: __lowerCAmelCase = trimmed_name.replace('''norm2''', '''layernorm2''') elif "fc1" in old_name: __lowerCAmelCase = trimmed_name.replace('''fc1''', '''linear_in''') elif "fc2" in old_name: __lowerCAmelCase = trimmed_name.replace('''fc2''', '''linear_out''') __lowerCAmelCase = '''last_stage.''' + trimmed_name elif "network" in old_name and re.search(r'''.\d.''', lowerCamelCase): __lowerCAmelCase = old_name.replace('''network''', '''intermediate_stages''') if "fc" in new_name: __lowerCAmelCase = new_name.replace('''fc''', '''convolution''') elif ("norm1" in new_name) and ("layernorm1" not in new_name): __lowerCAmelCase = new_name.replace('''norm1''', '''batchnorm_before''') elif ("norm2" in new_name) and ("layernorm2" not in new_name): __lowerCAmelCase = new_name.replace('''norm2''', '''batchnorm_after''') if "proj" in new_name: __lowerCAmelCase = new_name.replace('''proj''', '''projection''') if "dist_head" in new_name: __lowerCAmelCase = new_name.replace('''dist_head''', '''distillation_classifier''') elif "head" in new_name: __lowerCAmelCase = new_name.replace('''head''', '''classifier''') elif "patch_embed" in new_name: __lowerCAmelCase = '''efficientformer.''' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": __lowerCAmelCase = new_name.replace('''norm''', '''layernorm''') __lowerCAmelCase = '''efficientformer.''' + new_name else: __lowerCAmelCase = '''efficientformer.encoder.''' + new_name return new_name def __magic_name__( lowerCamelCase, lowerCamelCase): for key in checkpoint.copy().keys(): __lowerCAmelCase = checkpoint.pop(lowerCamelCase) __lowerCAmelCase = val return checkpoint def __magic_name__( ): __lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw) return image def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model'''] __lowerCAmelCase = EfficientFormerConfig.from_json_file(lowerCamelCase) __lowerCAmelCase = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase) __lowerCAmelCase = '''_'''.join(checkpoint_path.split('''/''')[-1].split('''.''')[0].split('''_''')[:-1]) __lowerCAmelCase = config.depths[-1] - config.num_metaad_blocks + 1 __lowerCAmelCase = convert_torch_checkpoint(lowerCamelCase, lowerCamelCase) model.load_state_dict(lowerCamelCase) model.eval() __lowerCAmelCase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } # prepare image __lowerCAmelCase = prepare_img() __lowerCAmelCase = 2_5_6 __lowerCAmelCase = 2_2_4 __lowerCAmelCase = EfficientFormerImageProcessor( size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], ) __lowerCAmelCase = processor(images=lowerCamelCase, return_tensors='''pt''').pixel_values # original processing pipeline __lowerCAmelCase = Compose( [ Resize(lowerCamelCase, interpolation=pillow_resamplings['''bicubic''']), CenterCrop(lowerCamelCase), ToTensor(), Normalize(lowerCamelCase, lowerCamelCase), ]) __lowerCAmelCase = image_transforms(lowerCamelCase).unsqueeze(0) assert torch.allclose(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = model(lowerCamelCase) __lowerCAmelCase = outputs.logits __lowerCAmelCase = (1, 1_0_0_0) if "l1" in model_name: __lowerCAmelCase = torch.Tensor( [-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28]) assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3) assert logits.shape == expected_shape elif "l3" in model_name: __lowerCAmelCase = torch.Tensor( [-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27]) assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3) assert logits.shape == expected_shape elif "l7" in model_name: __lowerCAmelCase = torch.Tensor( [-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78]) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""") # Save Checkpoints Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) model.save_pretrained(lowerCamelCase) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""") processor.save_pretrained(lowerCamelCase) print(F"""Processor successfuly saved at {pytorch_dump_path}""") if push_to_hub: print('''Pushing model to the hub...''') model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=lowerCamelCase, ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=lowerCamelCase, ) if __name__ == "__main__": _UpperCAmelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) _UpperCAmelCase : List[str] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
9
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) self.assertTrue(isinstance(dc.token_ids , __lowercase ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _snake_case (self ): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __lowerCAmelCase = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowercase ): DisjunctiveConstraint(__lowercase ) # fails here def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase = dc.update(1 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase = dc.update(2 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase = dc.update(3 ) __lowerCAmelCase = stepped is True and completed is True and reset is False self.assertTrue(__lowercase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
351
'''simple docstring''' from __future__ import annotations import math def __magic_name__( lowerCamelCase, lowerCamelCase): if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2: raise Exception('''Matrices are not 2x2''') __lowerCAmelCase = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def __magic_name__( lowerCamelCase, lowerCamelCase): return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(lowerCamelCase)) ] def __magic_name__( lowerCamelCase, lowerCamelCase): return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(lowerCamelCase)) ] def __magic_name__( lowerCamelCase): if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0: raise Exception('''Odd matrices are not supported!''') __lowerCAmelCase = len(lowerCamelCase) __lowerCAmelCase = matrix_length // 2 __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)] __lowerCAmelCase = [ [a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase) ] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)] return top_left, top_right, bot_left, bot_right def __magic_name__( lowerCamelCase): return len(lowerCamelCase), len(matrix[0]) def __magic_name__( lowerCamelCase): print('''\n'''.join(str(lowerCamelCase) for line in matrix)) def __magic_name__( lowerCamelCase, lowerCamelCase): if matrix_dimensions(lowerCamelCase) == (2, 2): return default_matrix_multiplication(lowerCamelCase, lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase) __lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase) __lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase) __lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase) # construct the new matrix from our 4 quadrants __lowerCAmelCase = [] for i in range(len(lowerCamelCase)): new_matrix.append(top_left[i] + top_right[i]) for i in range(len(lowerCamelCase)): new_matrix.append(bot_left[i] + bot_right[i]) return new_matrix def __magic_name__( lowerCamelCase, lowerCamelCase): if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]: __lowerCAmelCase = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(lowerCamelCase) __lowerCAmelCase = matrix_dimensions(lowerCamelCase) __lowerCAmelCase = matrix_dimensions(lowerCamelCase) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase) __lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase)))) __lowerCAmelCase = matrixa __lowerCAmelCase = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0, lowerCamelCase): if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): new_matrixa[i].append(0) else: new_matrixa.append([0] * maxim) if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): new_matrixa[i].append(0) else: new_matrixa.append([0] * maxim) __lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase) # Removing the additional zeros for i in range(0, lowerCamelCase): if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": _UpperCAmelCase : List[str] = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] _UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]] print(strassen(matrixa, matrixa))
9
0
import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency _UpperCAmelCase : Any = { 'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I': 6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C': 2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P': 1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z': 0.07, } _UpperCAmelCase : Union[str, Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' _UpperCAmelCase : str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def __magic_name__( lowerCamelCase): __lowerCAmelCase = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def __magic_name__( lowerCamelCase): return x[0] def __magic_name__( lowerCamelCase): __lowerCAmelCase = get_letter_count(UpperCAmelCase_) __lowerCAmelCase = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(UpperCAmelCase_) __lowerCAmelCase = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find, reverse=UpperCAmelCase_) __lowerCAmelCase = ''''''.join(freq_to_letter[freq]) __lowerCAmelCase = list(freq_to_letter_str.items()) freq_pairs.sort(key=UpperCAmelCase_, reverse=UpperCAmelCase_) __lowerCAmelCase = [freq_pair[1] for freq_pair in freq_pairs] return "".join(UpperCAmelCase_) def __magic_name__( lowerCamelCase): __lowerCAmelCase = get_frequency_order(UpperCAmelCase_) __lowerCAmelCase = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
352
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) __lowerCAmelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowercase , __lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase ) __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowercase ) self.assertIsInstance(processor_fast.tokenizer , __lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowercase ) self.assertIsInstance(processor_fast.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 ) __lowerCAmelCase = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' ) __lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = tokenizer(__lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__lowercase ) __lowerCAmelCase = tokenizer.batch_decode(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
9
0
'''simple docstring''' def __magic_name__( lowerCamelCase = 1_0, lowerCamelCase = 2_2): __lowerCAmelCase = range(1, _lowerCAmelCase) __lowerCAmelCase = range(1, _lowerCAmelCase) return sum( 1 for power in powers for base in bases if len(str(base**power)) == power) if __name__ == "__main__": print(f"""{solution(1_0, 2_2) = }""")
353
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class a__ ( __A ): """simple docstring""" def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ): __lowerCAmelCase = 1.0 if scale is None else scale __lowerCAmelCase = 0.0 if loc is None else loc super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] ) @property def _snake_case (self ): return self.base_dist.mean * self.scale + self.loc @property def _snake_case (self ): return self.base_dist.variance * self.scale**2 @property def _snake_case (self ): return self.variance.sqrt() class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ): super().__init__(**__lowercase ) __lowerCAmelCase = args_dim __lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] ) __lowerCAmelCase = domain_map def _snake_case (self , __lowercase ): __lowerCAmelCase = [proj(__lowercase ) for proj in self.proj] return self.domain_map(*__lowercase ) class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase ): super().__init__() __lowerCAmelCase = function def _snake_case (self , __lowercase , *__lowercase ): return self.function(__lowercase , *__lowercase ) class a__ : """simple docstring""" __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__(self , __lowercase = 1 ): __lowerCAmelCase = dim __lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim} def _snake_case (self , __lowercase ): if self.dim == 1: return self.distribution_class(*__lowercase ) else: return Independent(self.distribution_class(*__lowercase ) , 1 ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ): __lowerCAmelCase = self._base_distribution(__lowercase ) if loc is None and scale is None: return distr else: return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim ) @property def _snake_case (self ): return () if self.dim == 1 else (self.dim,) @property def _snake_case (self ): return len(self.event_shape ) @property def _snake_case (self ): return 0.0 def _snake_case (self , __lowercase ): return ParameterProjection( in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _snake_case (self , *__lowercase ): raise NotImplementedError() @staticmethod def _snake_case (__lowercase ): return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0 class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def _snake_case (cls , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) __lowerCAmelCase = 2.0 + cls.squareplus(__lowercase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def _snake_case (cls , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def _snake_case (cls , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _snake_case (self , __lowercase ): __lowerCAmelCase , __lowerCAmelCase = distr_args if self.dim == 1: return self.distribution_class(total_count=__lowercase , logits=__lowercase ) else: return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ): __lowerCAmelCase , __lowerCAmelCase = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
9
0
'''simple docstring''' # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def __magic_name__( lowerCamelCase): return 1 / (1 + np.exp(-z)) def __magic_name__( lowerCamelCase, lowerCamelCase): return (-y * np.log(lowerCamelCase) - (1 - y) * np.log(1 - h)).mean() def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase) return np.sum(y * scores - np.log(1 + np.exp(lowerCamelCase))) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=7_0_0_0_0): __lowerCAmelCase = np.zeros(x.shape[1]) for iterations in range(lowerCamelCase): __lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = sigmoid_function(lowerCamelCase) __lowerCAmelCase = np.dot(x.T, h - y) / y.size __lowerCAmelCase = theta - alpha * gradient # updating the weights __lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = sigmoid_function(lowerCamelCase) __lowerCAmelCase = cost_function(lowerCamelCase, lowerCamelCase) if iterations % 1_0_0 == 0: print(F"""loss: {j} \t""") # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": _UpperCAmelCase : Union[str, Any] = datasets.load_iris() _UpperCAmelCase : List[str] = iris.data[:, :2] _UpperCAmelCase : Dict = (iris.target != 0) * 1 _UpperCAmelCase : Tuple = 0.1 _UpperCAmelCase : str = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0) print("""theta: """, theta) # printing the theta i.e our weights vector def __magic_name__( lowerCamelCase): return sigmoid_function( np.dot(lowerCamelCase, lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(1_0, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""") plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""") (_UpperCAmelCase) : Optional[Any] = (x[:, 0].min(), x[:, 0].max()) (_UpperCAmelCase) : str = (x[:, 1].min(), x[:, 1].max()) (_UpperCAmelCase) : List[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) _UpperCAmelCase : Tuple = np.c_[xxa.ravel(), xxa.ravel()] _UpperCAmelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""") plt.legend() plt.show()
354
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class a__ ( __A ): """simple docstring""" __UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa' __UpperCamelCase : List[str] = ( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) __UpperCamelCase : Optional[int] = 'document_qa' __UpperCamelCase : Optional[int] = AutoProcessor __UpperCamelCase : Tuple = VisionEncoderDecoderModel __UpperCamelCase : Any = ['image', 'text'] __UpperCamelCase : Optional[Any] = ['text'] def __init__(self , *__lowercase , **__lowercase ): if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' __lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase ) __lowerCAmelCase = self.pre_processor.tokenizer( __lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids __lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _snake_case (self , __lowercase ): return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences def _snake_case (self , __lowercase ): __lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0] __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) __lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token __lowerCAmelCase = self.pre_processor.tokenajson(__lowercase ) return sequence["answer"]
9
0
'''simple docstring''' def __magic_name__( lowerCamelCase): if len(__UpperCAmelCase) <= 1: return [tuple(__UpperCAmelCase)] __lowerCAmelCase = [] def generate(lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [0] * n res.append(tuple(__UpperCAmelCase)) __lowerCAmelCase = 0 while i < n: if c[i] < i: if i % 2 == 0: __lowerCAmelCase , __lowerCAmelCase = arr[i], arr[0] else: __lowerCAmelCase , __lowerCAmelCase = arr[i], arr[c[i]] res.append(tuple(__UpperCAmelCase)) c[i] += 1 __lowerCAmelCase = 0 else: __lowerCAmelCase = 0 i += 1 generate(len(__UpperCAmelCase), __UpperCAmelCase) return res if __name__ == "__main__": _UpperCAmelCase : int = input("""Enter numbers separated by a comma:\n""").strip() _UpperCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
355
'''simple docstring''' def __magic_name__( lowerCamelCase): __lowerCAmelCase = 1 __lowerCAmelCase = 2 while i * i <= n: __lowerCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __magic_name__( ): __lowerCAmelCase = 1 __lowerCAmelCase = 1 while True: i += 1 t_num += i if count_divisors(lowerCamelCase) > 5_0_0: break return t_num if __name__ == "__main__": print(solution())
9
0
'''simple docstring''' import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _UpperCAmelCase : List[Any] = { '''facebook/mask2former-swin-small-coco-instance''': ( '''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json''' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } _UpperCAmelCase : Tuple = logging.get_logger(__name__) class a__ ( A__ ): """simple docstring""" __UpperCamelCase : Optional[int] = 'mask2former' __UpperCamelCase : List[str] = ['swin'] __UpperCamelCase : Optional[int] = {'hidden_size': 'hidden_dim'} def __init__(self , __lowercase = None , __lowercase = 2_56 , __lowercase = 2_56 , __lowercase = 2_56 , __lowercase = 10_24 , __lowercase = "relu" , __lowercase = 6 , __lowercase = 10 , __lowercase = 8 , __lowercase = 0.0 , __lowercase = 20_48 , __lowercase = False , __lowercase = False , __lowercase = 4 , __lowercase = 2_55 , __lowercase = 1_00 , __lowercase = 0.1 , __lowercase = 2.0 , __lowercase = 5.0 , __lowercase = 5.0 , __lowercase = 1_25_44 , __lowercase = 3.0 , __lowercase = 0.7_5 , __lowercase = 0.0_2 , __lowercase = 1.0 , __lowercase = True , __lowercase = [4, 8, 16, 32] , __lowercase = None , **__lowercase , ): if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' ) __lowerCAmelCase = CONFIG_MAPPING['''swin''']( image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCAmelCase = backbone_config.pop('''model_type''' ) __lowerCAmelCase = CONFIG_MAPPING[backbone_model_type] __lowerCAmelCase = config_class.from_dict(lowerCamelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ F"""Supported model types: {",".join(self.backbones_supported )}""" ) __lowerCAmelCase = backbone_config __lowerCAmelCase = feature_size __lowerCAmelCase = mask_feature_size __lowerCAmelCase = hidden_dim __lowerCAmelCase = encoder_feedforward_dim __lowerCAmelCase = activation_function __lowerCAmelCase = encoder_layers __lowerCAmelCase = decoder_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = dropout __lowerCAmelCase = dim_feedforward __lowerCAmelCase = pre_norm __lowerCAmelCase = enforce_input_projection __lowerCAmelCase = common_stride __lowerCAmelCase = ignore_value __lowerCAmelCase = num_queries __lowerCAmelCase = no_object_weight __lowerCAmelCase = class_weight __lowerCAmelCase = mask_weight __lowerCAmelCase = dice_weight __lowerCAmelCase = train_num_points __lowerCAmelCase = oversample_ratio __lowerCAmelCase = importance_sample_ratio __lowerCAmelCase = init_std __lowerCAmelCase = init_xavier_std __lowerCAmelCase = use_auxiliary_loss __lowerCAmelCase = feature_strides __lowerCAmelCase = output_auxiliary_logits __lowerCAmelCase = decoder_layers super().__init__(**lowerCamelCase__ ) @classmethod def _snake_case (cls , __lowercase , **__lowercase ): return cls( backbone_config=lowerCamelCase__ , **lowerCamelCase__ , ) def _snake_case (self ): __lowerCAmelCase = copy.deepcopy(self.__dict__ ) __lowerCAmelCase = self.backbone_config.to_dict() __lowerCAmelCase = self.__class__.model_type return output
356
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) self.assertTrue(isinstance(dc.token_ids , __lowercase ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _snake_case (self ): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __lowerCAmelCase = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowercase ): DisjunctiveConstraint(__lowercase ) # fails here def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 ) __lowerCAmelCase = stepped is True and completed is True and reset is False self.assertTrue(__lowercase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
9
0
'''simple docstring''' import os from pathlib import Path def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, oder?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] __lowerCAmelCase = { '''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''], '''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''], '''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''], '''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''], } __lowerCAmelCase = F"""{src_lang}-{tgt_lang}""" __lowerCAmelCase = F"""\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n""" os.makedirs(A_, exist_ok=A_) __lowerCAmelCase = os.path.join(A_, '''README.md''') print(F"""Generating {path}""") with open(A_, '''w''', encoding='''utf-8''') as f: f.write(A_) # make sure we are under the root of the project _UpperCAmelCase : Any = Path(__file__).resolve().parent.parent.parent _UpperCAmelCase : Union[str, Any] = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: _UpperCAmelCase : Optional[int] = model_name.split("""-""") _UpperCAmelCase : Any = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
357
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets _UpperCAmelCase : List[str] = """ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ _UpperCAmelCase : str = """ Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric(\"mean_iou\") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ _UpperCAmelCase : Tuple = """\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }""" def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): if label_map is not None: for old_id, new_id in label_map.items(): __lowerCAmelCase = new_id # turn into Numpy arrays __lowerCAmelCase = np.array(lowerCamelCase) __lowerCAmelCase = np.array(lowerCamelCase) if reduce_labels: __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label - 1 __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label != ignore_index __lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = pred_label[mask] __lowerCAmelCase = np.array(lowerCamelCase)[mask] __lowerCAmelCase = pred_label[pred_label == label] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) # compute metrics __lowerCAmelCase = {} __lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum() __lowerCAmelCase = total_area_intersect / total_area_union __lowerCAmelCase = total_area_intersect / total_area_label __lowerCAmelCase = np.nanmean(lowerCamelCase) __lowerCAmelCase = np.nanmean(lowerCamelCase) __lowerCAmelCase = all_acc __lowerCAmelCase = iou __lowerCAmelCase = acc if nan_to_num is not None: __lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def _snake_case (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { '''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), '''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), } ) , reference_urls=[ '''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py''' ] , ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ): __lowerCAmelCase = mean_iou( results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , ) return iou_result
9
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class a__ ( __A ): """simple docstring""" __UpperCamelCase : int = ['pixel_values'] def __init__(self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BICUBIC , __lowercase = True , __lowercase = None , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = True , **__lowercase , ): super().__init__(**__A ) __lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_24} __lowerCAmelCase = get_size_dict(__A , default_to_square=__A ) __lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} __lowerCAmelCase = get_size_dict(__A , default_to_square=__A , param_name='''crop_size''' ) __lowerCAmelCase = do_resize __lowerCAmelCase = size __lowerCAmelCase = resample __lowerCAmelCase = do_center_crop __lowerCAmelCase = crop_size __lowerCAmelCase = do_rescale __lowerCAmelCase = rescale_factor __lowerCAmelCase = do_normalize __lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCAmelCase = do_convert_rgb def _snake_case (self , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ): __lowerCAmelCase = get_size_dict(__A , default_to_square=__A ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCAmelCase = get_resize_output_image_size(__A , size=size['''shortest_edge'''] , default_to_square=__A ) return resize(__A , size=__A , resample=__A , data_format=__A , **__A ) def _snake_case (self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ): __lowerCAmelCase = get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__A , size=(size['''height'''], size['''width''']) , data_format=__A , **__A ) def _snake_case (self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ): return rescale(__A , scale=__A , data_format=__A , **__A ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ): return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ): __lowerCAmelCase = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase = size if size is not None else self.size __lowerCAmelCase = get_size_dict(__A , param_name='''size''' , default_to_square=__A ) __lowerCAmelCase = resample if resample is not None else self.resample __lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCAmelCase = crop_size if crop_size is not None else self.crop_size __lowerCAmelCase = get_size_dict(__A , param_name='''crop_size''' , default_to_square=__A ) __lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCAmelCase = image_mean if image_mean is not None else self.image_mean __lowerCAmelCase = image_std if image_std is not None else self.image_std __lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCAmelCase = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCAmelCase = [convert_to_rgb(__A ) for image in images] # All transformations expect numpy arrays. __lowerCAmelCase = [to_numpy_array(__A ) for image in images] if do_resize: __lowerCAmelCase = [self.resize(image=__A , size=__A , resample=__A ) for image in images] if do_center_crop: __lowerCAmelCase = [self.center_crop(image=__A , size=__A ) for image in images] if do_rescale: __lowerCAmelCase = [self.rescale(image=__A , scale=__A ) for image in images] if do_normalize: __lowerCAmelCase = [self.normalize(image=__A , mean=__A , std=__A ) for image in images] __lowerCAmelCase = [to_channel_dimension_format(__A , __A ) for image in images] __lowerCAmelCase = {'''pixel_values''': images} return BatchFeature(data=__A , tensor_type=__A )
358
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : str = DebertaTokenizer __UpperCamelCase : str = True __UpperCamelCase : Any = DebertaTokenizerFast def _snake_case (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCAmelCase = {'''unk_token''': '''[UNK]'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) def _snake_case (self , **__lowercase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = '''lower newer''' return input_text, output_text def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowerCAmelCase = tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) __lowerCAmelCase = tokens + [tokenizer.unk_token] __lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = tokenizer('''Hello''' , '''World''' ) __lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __lowercase ) @slow def _snake_case (self ): __lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case (self ): __lowerCAmelCase = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] __lowerCAmelCase = tokenizer(__lowercase , padding=__lowercase ) __lowerCAmelCase = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']] # fmt: off __lowerCAmelCase = { '''input_ids''': [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __lowercase ) for expected, decoded in zip(__lowercase , __lowercase ): self.assertEqual(__lowercase , __lowercase )
9
0
'''simple docstring''' import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def __magic_name__( lowerCamelCase): # picklable for multiprocessing return x.sum() def __magic_name__( lowerCamelCase): # picklable for multiprocessing return i + 1 @dataclass class a__ : """simple docstring""" __UpperCamelCase : int __UpperCamelCase : str class a__ ( UpperCAmelCase__ ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = {} __lowerCAmelCase = [] __lowerCAmelCase = 1 __lowerCAmelCase = [1, 2] __lowerCAmelCase = {"""a""": 1, """b""": 2} __lowerCAmelCase = {"""a""": [1, 2], """b""": [3, 4]} __lowerCAmelCase = {"""a""": {"""1""": 1}, """b""": 2} __lowerCAmelCase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4} __lowerCAmelCase = {} __lowerCAmelCase = [] __lowerCAmelCase = 2 __lowerCAmelCase = [2, 3] __lowerCAmelCase = {"""a""": 2, """b""": 3} __lowerCAmelCase = {"""a""": [2, 3], """b""": [4, 5]} __lowerCAmelCase = {"""a""": {"""1""": 2}, """b""": 3} __lowerCAmelCase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5} self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase = 2 self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )} __lowerCAmelCase = {"""a""": 2, """b""": 0, """c""": 2} __lowerCAmelCase = { """a""": np.eye(2 ).astype(_SCREAMING_SNAKE_CASE ), """b""": np.zeros(3 ).astype(_SCREAMING_SNAKE_CASE ), """c""": np.ones(2 ).astype(_SCREAMING_SNAKE_CASE ), } self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual( {k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual( {k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): # can't pickle a local lambda map_nested(lambda __lowercase : x + 1 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) def _snake_case (self ): __lowerCAmelCase = {"""a""": 1, """b""": 2} __lowerCAmelCase = {"""a""": 3, """b""": 4} __lowerCAmelCase = {"""a""": 5, """b""": 6} __lowerCAmelCase = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE ) def _snake_case (self ): class a__ : """simple docstring""" __UpperCamelCase : List[str] = 'bar' __lowerCAmelCase = Foo() self.assertEqual(foo.my_attr , '''bar''' ) with temporary_assignment(_SCREAMING_SNAKE_CASE , '''my_attr''' , '''BAR''' ): self.assertEqual(foo.my_attr , '''BAR''' ) self.assertEqual(foo.my_attr , '''bar''' ) @pytest.mark.parametrize( '''iterable_length, num_proc, expected_num_proc''', [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (1_6, 1_6, 1_6), (1_6, 1_7, 1_6), (1_7, 1_6, 1_6), ], ) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): with patch('''datasets.utils.py_utils._single_map_nested''') as mock_single_map_nested, patch( '''datasets.parallel.parallel.Pool''') as mock_multiprocessing_pool: __lowerCAmelCase = {F"""{i}""": i for i in range(lowerCamelCase)} __lowerCAmelCase = map_nested(lambda lowerCamelCase: x + 1_0, lowerCamelCase, num_proc=lowerCamelCase, parallel_min_length=1_6) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class a__ ( UpperCAmelCase__ ): """simple docstring""" @require_tf def _snake_case (self ): import tensorflow as tf from tensorflow.keras import layers __lowerCAmelCase = layers.Dense(2 ) def gen_random_output(): __lowerCAmelCase = tf.random.uniform((1, 3) ) return model(_SCREAMING_SNAKE_CASE ).numpy() with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ): __lowerCAmelCase = gen_random_output() with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ): __lowerCAmelCase = gen_random_output() __lowerCAmelCase = gen_random_output() np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def _snake_case (self ): import torch def gen_random_output(): __lowerCAmelCase = torch.nn.Linear(3 , 2 ) __lowerCAmelCase = torch.rand(1 , 3 ) return model(_SCREAMING_SNAKE_CASE ).detach().numpy() with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ): __lowerCAmelCase = gen_random_output() with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ): __lowerCAmelCase = gen_random_output() __lowerCAmelCase = gen_random_output() np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def _snake_case (self ): def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): __lowerCAmelCase = gen_random_output() with temp_seed(42 ): __lowerCAmelCase = gen_random_output() __lowerCAmelCase = gen_random_output() np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize('''input_data''', [{}]) def __magic_name__( lowerCamelCase): __lowerCAmelCase = NestedDataStructure(lowerCamelCase).data assert output_data == input_data @pytest.mark.parametrize( '''data, expected_output''', [ ({}, []), ([], []), ('''foo''', ['''foo''']), (['''foo''', '''bar'''], ['''foo''', '''bar''']), ([['''foo''', '''bar''']], ['''foo''', '''bar''']), ([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']), ([[['''foo'''], '''bar''']], ['''foo''', '''bar''']), ({'''a''': 1, '''b''': 2}, [1, 2]), ({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]), ({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]), ], ) def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = NestedDataStructure(lowerCamelCase).flatten() assert output == expected_output def __magic_name__( ): __lowerCAmelCase = A(x=1, y='''foobar''') __lowerCAmelCase = {"""x""": 1, """y""": """foobar"""} assert asdict(lowerCamelCase) == expected_output __lowerCAmelCase = {"""a""": {"""b""": A(x=1_0, y='''foo''')}, """c""": [A(x=2_0, y='''bar''')]} __lowerCAmelCase = {"""a""": {"""b""": {"""x""": 1_0, """y""": """foo"""}}, """c""": [{"""x""": 2_0, """y""": """bar"""}]} assert asdict(lowerCamelCase) == expected_output with pytest.raises(lowerCamelCase): asdict([1, A(x=1_0, y='''foo''')]) def __magic_name__( lowerCamelCase): return text.split() def __magic_name__( lowerCamelCase): yield (time.time(), content) time.sleep(2) yield (time.time(), content) def __magic_name__( ): with Pool(2) as pool: __lowerCAmelCase = list(iflatmap_unordered(lowerCamelCase, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 1_0)) assert out.count('''hello''') == 1_0 assert out.count('''there''') == 1_0 assert len(lowerCamelCase) == 2_0 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2) as pool: __lowerCAmelCase = list(iflatmap_unordered(lowerCamelCase, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 1_0)) assert out.count('''hello''') == 1_0 assert out.count('''there''') == 1_0 assert len(lowerCamelCase) == 2_0 # check that we get items as fast as possible with Pool(2) as pool: __lowerCAmelCase = [] for yield_time, content in iflatmap_unordered( lowerCamelCase, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}]): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(lowerCamelCase) assert out.count('''a''') == 2 assert out.count('''b''') == 2 assert len(lowerCamelCase) == 4
359
'''simple docstring''' import argparse import datetime def __magic_name__( lowerCamelCase): __lowerCAmelCase = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } __lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowerCamelCase) < 1_1: raise ValueError('''Must be 10 characters long''') # Get month __lowerCAmelCase = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 1_3: raise ValueError('''Month must be between 1 - 12''') __lowerCAmelCase = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get day __lowerCAmelCase = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 3_2: raise ValueError('''Date must be between 1 - 31''') # Get second separator __lowerCAmelCase = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get year __lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 4_5 < y < 8_5_0_0: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''') # Get datetime obj for validation __lowerCAmelCase = datetime.date(int(lowerCamelCase), int(lowerCamelCase), int(lowerCamelCase)) # Start math if m <= 2: __lowerCAmelCase = y - 1 __lowerCAmelCase = m + 1_2 # maths var __lowerCAmelCase = int(str(lowerCamelCase)[:2]) __lowerCAmelCase = int(str(lowerCamelCase)[2:]) __lowerCAmelCase = int(2.6 * m - 5.39) __lowerCAmelCase = int(c / 4) __lowerCAmelCase = int(k / 4) __lowerCAmelCase = int(d + k) __lowerCAmelCase = int(t + u + v + x) __lowerCAmelCase = int(z - (2 * c)) __lowerCAmelCase = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''') # Response __lowerCAmelCase = F"""Your date {date_input}, is a {days[str(lowerCamelCase)]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : List[str] = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) _UpperCAmelCase : Dict = parser.parse_args() zeller(args.date_input)
9
0
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def __magic_name__( lowerCamelCase): if num <= 0: raise ValueError('''math domain error''') return quad(_a, 0, _a, args=(_a))[0] def __magic_name__( lowerCamelCase, lowerCamelCase): return math.pow(_a, z - 1) * math.exp(-x) if __name__ == "__main__": from doctest import testmod testmod()
360
'''simple docstring''' import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = ConsistencyModelPipeline __UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS __UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt __UpperCamelCase : List[Any] = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) @property def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def _snake_case (self , __lowercase=False ): if class_cond: __lowerCAmelCase = self.dummy_cond_unet else: __lowerCAmelCase = self.dummy_uncond_unet # Default to CM multistep sampler __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, } return components def _snake_case (self , __lowercase , __lowercase=0 ): if str(__lowercase ).startswith('''mps''' ): __lowerCAmelCase = torch.manual_seed(__lowercase ) else: __lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCAmelCase = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase ) __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 0 __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase ) __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = 0 __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case (self , __lowercase=0 , __lowercase=False , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ): __lowerCAmelCase = torch.manual_seed(__lowercase ) __lowerCAmelCase = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: __lowerCAmelCase = self.get_fixed_latents(seed=__lowercase , device=__lowercase , dtype=__lowercase , shape=__lowercase ) __lowerCAmelCase = latents return inputs def _snake_case (self , __lowercase=0 , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ): if type(__lowercase ) == str: __lowerCAmelCase = torch.device(__lowercase ) __lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase ) return latents def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs() __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs() __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ): __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ): __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
9
0
'''simple docstring''' def __magic_name__( lowerCamelCase): if number > 0: raise ValueError('''input must be a negative integer''') __lowerCAmelCase = len(bin(__lowerCAmelCase)[3:]) __lowerCAmelCase = bin(abs(__lowerCAmelCase) - (1 << binary_number_length))[3:] __lowerCAmelCase = ( ( '''1''' + '''0''' * (binary_number_length - len(__lowerCAmelCase)) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
361
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split _UpperCAmelCase : List[Any] = datasets.load_iris() _UpperCAmelCase : Dict = np.array(data["""data"""]) _UpperCAmelCase : int = np.array(data["""target"""]) _UpperCAmelCase : str = data["""target_names"""] _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = train_test_split(X, y) def __magic_name__( lowerCamelCase, lowerCamelCase): return np.linalg.norm(np.array(lowerCamelCase) - np.array(lowerCamelCase)) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=5): __lowerCAmelCase = zip(lowerCamelCase, lowerCamelCase) # List of distances of all points from the point to be classified __lowerCAmelCase = [] for data_point in data: __lowerCAmelCase = euclidean_distance(data_point[0], lowerCamelCase) distances.append((distance, data_point[1])) # Choosing 'k' points with the least distances. __lowerCAmelCase = [i[1] for i in sorted(lowerCamelCase)[:k]] # Most commonly occurring class among them # is the class into which the point is classified __lowerCAmelCase = Counter(lowerCamelCase).most_common(1)[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
9
0
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def __magic_name__( lowerCamelCase): return (data["data"], data["target"]) def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = XGBClassifier() classifier.fit(_lowercase, _lowercase) return classifier def __magic_name__( ): __lowerCAmelCase = load_iris() __lowerCAmelCase = data_handling(_lowercase) __lowerCAmelCase = train_test_split( _lowercase, _lowercase, test_size=0.25) __lowerCAmelCase = iris["target_names"] # Create an XGBoost Classifier from the training data __lowerCAmelCase = xgboost(_lowercase, _lowercase) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( _lowercase, _lowercase, _lowercase, display_labels=_lowercase, cmap='''Blues''', normalize='''true''', ) plt.title('''Normalized Confusion Matrix - IRIS Dataset''') plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
362
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) __lowerCAmelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowercase , __lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase ) def _snake_case (self , **__lowercase ): return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase ) __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowercase ) self.assertIsInstance(processor_fast.tokenizer , __lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowercase ) self.assertIsInstance(processor_fast.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase ) __lowerCAmelCase = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' ) __lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__lowercase , return_tensors='''np''' ) __lowerCAmelCase = tokenizer(__lowercase , return_tensors='''np''' ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = ['''cat''', '''nasa badge'''] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = [['''cat''', '''nasa badge'''], ['''person''']] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 __lowerCAmelCase = len(__lowercase ) __lowerCAmelCase = max([len(__lowercase ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = ['''cat''', '''nasa badge'''] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 __lowerCAmelCase = inputs['''input_ids'''] __lowerCAmelCase = [ [4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(images=__lowercase , query_images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__lowercase ) __lowerCAmelCase = tokenizer.batch_decode(__lowercase ) self.assertListEqual(__lowercase , __lowercase )
9
0
'''simple docstring''' def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=False): if isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase): __lowerCAmelCase = len(set_a.intersection(lowerCamelCase)) if alternative_union: __lowerCAmelCase = len(lowerCamelCase) + len(lowerCamelCase) else: __lowerCAmelCase = len(set_a.union(lowerCamelCase)) return intersection / union if isinstance(lowerCamelCase, (list, tuple)) and isinstance(lowerCamelCase, (list, tuple)): __lowerCAmelCase = [element for element in set_a if element in set_b] if alternative_union: __lowerCAmelCase = len(lowerCamelCase) + len(lowerCamelCase) return len(lowerCamelCase) / union else: __lowerCAmelCase = set_a + [element for element in set_b if element not in set_a] return len(lowerCamelCase) / len(lowerCamelCase) return len(lowerCamelCase) / len(lowerCamelCase) return None if __name__ == "__main__": _UpperCAmelCase : Dict = {"""a""", """b""", """c""", """d""", """e"""} _UpperCAmelCase : List[str] = {"""c""", """d""", """e""", """f""", """h""", """i"""} print(jaccard_similarity(set_a, set_b))
363
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __magic_name__( ): __lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)] __lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0) return (arr, r) _UpperCAmelCase : Dict = make_dataset() def __magic_name__( lowerCamelCase, lowerCamelCase): for triplet in permutations(lowerCamelCase, 3): if sum(lowerCamelCase) == target: return tuple(sorted(lowerCamelCase)) return (0, 0, 0) def __magic_name__( lowerCamelCase, lowerCamelCase): arr.sort() __lowerCAmelCase = len(lowerCamelCase) for i in range(n - 1): __lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __magic_name__( ): __lowerCAmelCase = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' __lowerCAmelCase = ''' triplet_sum1(*dataset) ''' __lowerCAmelCase = ''' triplet_sum2(*dataset) ''' __lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0) __lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0) return (min(lowerCamelCase), min(lowerCamelCase)) if __name__ == "__main__": from doctest import testmod testmod() _UpperCAmelCase : Union[str, Any] = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
9
0
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCAmelCase : List[str] = logging.get_logger(__name__) _UpperCAmelCase : Tuple = {"""vocab_file""": """spiece.model"""} _UpperCAmelCase : Union[str, Any] = { """vocab_file""": { """AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""", } } _UpperCAmelCase : Any = { """AI-Sweden/gpt-sw3-126m""": 2_0_4_8, """AI-Sweden/gpt-sw3-350m""": 2_0_4_8, """AI-Sweden/gpt-sw3-1.6b""": 2_0_4_8, """AI-Sweden/gpt-sw3-6.7b""": 2_0_4_8, """AI-Sweden/gpt-sw3-20b""": 2_0_4_8, } class a__ ( __lowerCAmelCase ): """simple docstring""" __UpperCamelCase : str = VOCAB_FILES_NAMES __UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Tuple = ['''input_ids''', '''attention_mask'''] def __init__(self , __lowercase , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase = None , **__lowercase , ): __lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs __lowerCAmelCase = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) __lowerCAmelCase = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __lowerCAmelCase = '''<|endoftext|>''' if eos_token is None else eos_token __lowerCAmelCase = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __lowerCAmelCase = unk_token if pad_token is None else pad_token __lowerCAmelCase = eos_token if bos_token is None else bos_token else: __lowerCAmelCase = '''<pad>''' if pad_token is None else pad_token __lowerCAmelCase = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , ) __lowerCAmelCase = do_lower_case __lowerCAmelCase = remove_space __lowerCAmelCase = keep_accents __lowerCAmelCase = vocab_file __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase_ ) # Used for whitespace normalization in input texts # fmt : off __lowerCAmelCase = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __lowerCAmelCase = re.compile( F"""[{"".join(map(lowerCAmelCase_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]""" ) def __getstate__(self ): __lowerCAmelCase = self.__dict__.copy() __lowerCAmelCase = None return state def __setstate__(self , __lowercase ): __lowerCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowerCAmelCase = {} __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _snake_case (self ): return len(self.sp_model ) def _snake_case (self , __lowercase ): __lowerCAmelCase = self.non_printing_characters_re.sub('''''' , lowerCAmelCase_ ) # Normalize whitespaces __lowerCAmelCase = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization __lowerCAmelCase = unicodedata.normalize('''NFC''' , lowerCAmelCase_ ) return text def _snake_case (self , __lowercase , **__lowercase ): __lowerCAmelCase = self.preprocess_text(lowerCAmelCase_ ) return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ ) def _snake_case (self , __lowercase ): return self.sp_model.PieceToId(lowerCAmelCase_ ) def _snake_case (self , __lowercase ): return self.sp_model.IdToPiece(lowerCAmelCase_ ) @staticmethod def _snake_case (__lowercase ): return out_string def _snake_case (self , __lowercase ): __lowerCAmelCase = [] __lowerCAmelCase = '''''' __lowerCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCAmelCase_ ) + token __lowerCAmelCase = True __lowerCAmelCase = [] else: current_sub_tokens.append(lowerCAmelCase_ ) __lowerCAmelCase = False out_string += self.sp_model.decode(lowerCAmelCase_ ) return out_string def _snake_case (self ): __lowerCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case (self , __lowercase , __lowercase = None ): if not os.path.isdir(lowerCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase_ , '''wb''' ) as fi: __lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase_ ) return (out_vocab_file,) def _snake_case (self , __lowercase , __lowercase = False ): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowerCAmelCase = self.preprocess_text(lowerCAmelCase_ ) __lowerCAmelCase = self.sp_model.encode(lowerCAmelCase_ ) else: __lowerCAmelCase = [self.preprocess_text(lowerCAmelCase_ ) for t in text] __lowerCAmelCase = self.sp_model.encode(lowerCAmelCase_ ) if return_tensors is True or return_tensors == "pt": __lowerCAmelCase = torch.tensor(lowerCAmelCase_ ) return token_ids def _snake_case (self , __lowercase ): return self.sp_model.decode(lowerCAmelCase_ ) def _snake_case (self , __lowercase ): __lowerCAmelCase = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()] __lowerCAmelCase = ( F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(lowerCAmelCase_ ) + F"""{self.bos_token}Bot:""" ) return self.encode(text=lowerCAmelCase_ )
364
'''simple docstring''' import numpy as np def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1E-12, lowerCamelCase = 1_0_0, ): assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[1] # Ensure proper dimensionality. assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(lowerCamelCase) == np.iscomplexobj(lowerCamelCase) __lowerCAmelCase = np.iscomplexobj(lowerCamelCase) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(lowerCamelCase, input_matrix.conj().T) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __lowerCAmelCase = False __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 1E12 while not convergence: # Multiple matrix by the vector. __lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase) # Normalize the resulting output vector. __lowerCAmelCase = w / np.linalg.norm(lowerCamelCase) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __lowerCAmelCase = vector.conj().T if is_complex else vector.T __lowerCAmelCase = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase)) # Check convergence. __lowerCAmelCase = np.abs(lambda_ - lambda_previous) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __lowerCAmelCase = True __lowerCAmelCase = lambda_ if is_complex: __lowerCAmelCase = np.real(lambda_) return lambda_, vector def __magic_name__( ): __lowerCAmelCase = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]]) __lowerCAmelCase = np.array([4_1, 4, 2_0]) __lowerCAmelCase = real_input_matrix.astype(np.complexaaa) __lowerCAmelCase = np.triu(1J * complex_input_matrix, 1) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __lowerCAmelCase = np.array([4_1, 4, 2_0]).astype(np.complexaaa) for problem_type in ["real", "complex"]: if problem_type == "real": __lowerCAmelCase = real_input_matrix __lowerCAmelCase = real_vector elif problem_type == "complex": __lowerCAmelCase = complex_input_matrix __lowerCAmelCase = complex_vector # Our implementation. __lowerCAmelCase , __lowerCAmelCase = power_iteration(lowerCamelCase, lowerCamelCase) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(lowerCamelCase) # Last eigenvalue is the maximum one. __lowerCAmelCase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __lowerCAmelCase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(lowerCamelCase) - np.abs(lowerCamelCase)) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
9
0
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _UpperCAmelCase : List[Any] = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. _UpperCAmelCase : Optional[int] = direct_transformers_import(PATH_TO_TRANSFORMERS) _UpperCAmelCase : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _UpperCAmelCase : Union[str, Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") _UpperCAmelCase : Tuple = { 'DecisionTransformerConfig', 'EncoderDecoderConfig', 'MusicgenConfig', 'RagConfig', 'SpeechEncoderDecoderConfig', 'TimmBackboneConfig', 'VisionEncoderDecoderConfig', 'VisionTextDualEncoderConfig', 'LlamaConfig', } def __magic_name__( lowerCamelCase): __lowerCAmelCase = None # source code of `config_class` __lowerCAmelCase = inspect.getsource(lowerCAmelCase__) __lowerCAmelCase = _re_checkpoint.findall(lowerCAmelCase__) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/'''): __lowerCAmelCase = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link __lowerCAmelCase = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: __lowerCAmelCase = ckpt_name break return checkpoint def __magic_name__( ): __lowerCAmelCase = [] for config_class in list(CONFIG_MAPPING.values()): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue __lowerCAmelCase = get_checkpoint_from_config_class(lowerCAmelCase__) __lowerCAmelCase = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase__) if len(lowerCAmelCase__) > 0: __lowerCAmelCase = """\n""".join(sorted(lowerCAmelCase__)) raise ValueError(F"""The following configurations don\'t contain any valid checkpoint:\n{message}""") if __name__ == "__main__": check_config_docstrings_have_checkpoints()
365
'''simple docstring''' from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract _UpperCAmelCase : str = logging.get_logger(__name__) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): return [ int(1_0_0_0 * (box[0] / width)), int(1_0_0_0 * (box[1] / height)), int(1_0_0_0 * (box[2] / width)), int(1_0_0_0 * (box[3] / height)), ] def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = None): __lowerCAmelCase = tesseract_config if tesseract_config is not None else '''''' # apply OCR __lowerCAmelCase = to_pil_image(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = pil_image.size __lowerCAmelCase = pytesseract.image_to_data(lowerCamelCase, lang=lowerCamelCase, output_type='''dict''', config=lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates __lowerCAmelCase = [idx for idx, word in enumerate(lowerCamelCase) if not word.strip()] __lowerCAmelCase = [word for idx, word in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format __lowerCAmelCase = [] for x, y, w, h in zip(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [x, y, x + w, y + h] actual_boxes.append(lowerCamelCase) # finally, normalize the bounding boxes __lowerCAmelCase = [] for box in actual_boxes: normalized_boxes.append(normalize_box(lowerCamelCase, lowerCamelCase, lowerCamelCase)) assert len(lowerCamelCase) == len(lowerCamelCase), "Not as many words as there are bounding boxes" return words, normalized_boxes class a__ ( __A ): """simple docstring""" __UpperCamelCase : str = ['pixel_values'] def __init__(self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ): super().__init__(**__lowercase ) __lowerCAmelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24} __lowerCAmelCase = get_size_dict(__lowercase ) __lowerCAmelCase = do_resize __lowerCAmelCase = size __lowerCAmelCase = resample __lowerCAmelCase = apply_ocr __lowerCAmelCase = ocr_lang __lowerCAmelCase = tesseract_config def _snake_case (self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ): __lowerCAmelCase = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __lowerCAmelCase = (size['''height'''], size['''width''']) return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ): __lowerCAmelCase = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase = size if size is not None else self.size __lowerCAmelCase = get_size_dict(__lowercase ) __lowerCAmelCase = resample if resample is not None else self.resample __lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr __lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang __lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config __lowerCAmelCase = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) # All transformations expect numpy arrays. __lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images] if apply_ocr: requires_backends(self , '''pytesseract''' ) __lowerCAmelCase = [] __lowerCAmelCase = [] for image in images: __lowerCAmelCase , __lowerCAmelCase = apply_tesseract(__lowercase , __lowercase , __lowercase ) words_batch.append(__lowercase ) boxes_batch.append(__lowercase ) if do_resize: __lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) __lowerCAmelCase = [flip_channel_order(__lowercase ) for image in images] __lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] __lowerCAmelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase ) if apply_ocr: __lowerCAmelCase = words_batch __lowerCAmelCase = boxes_batch return data
9
0
def __magic_name__( lowerCamelCase, lowerCamelCase): if not len(SCREAMING_SNAKE_CASE__) == len(SCREAMING_SNAKE_CASE__) == 3: raise ValueError('''Please enter a valid equation.''') if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('''Both a & b of two equations can\'t be zero.''') # Extract the coefficients __lowerCAmelCase = equationa __lowerCAmelCase = equationa # Calculate the determinants of the matrices __lowerCAmelCase = aa * ba - aa * ba __lowerCAmelCase = ca * ba - ca * ba __lowerCAmelCase = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('''Infinite solutions. (Consistent system)''') else: raise ValueError('''No solution. (Inconsistent system)''') else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: __lowerCAmelCase = determinant_x / determinant __lowerCAmelCase = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
366
'''simple docstring''' from ..utils import DummyObject, requires_backends class a__ ( metaclass=__A ): """simple docstring""" __UpperCamelCase : int = ['torch', 'scipy'] def __init__(self , *__lowercase , **__lowercase ): requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''torch''', '''scipy'''] )
9
0
'''simple docstring''' import numpy # List of input, output pairs _UpperCAmelCase : int = ( ((5, 2, 3), 1_5), ((6, 5, 9), 2_5), ((1_1, 1_2, 1_3), 4_1), ((1, 1, 1), 8), ((1_1, 1_2, 1_3), 4_1), ) _UpperCAmelCase : List[str] = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0)) _UpperCAmelCase : Any = [2, 4, 1, 5] _UpperCAmelCase : Any = len(train_data) _UpperCAmelCase : Optional[int] = 0.0_09 def __magic_name__( lowerCamelCase, lowerCamelCase="train"): return calculate_hypothesis_value(lowerCamelCase_, lowerCamelCase_) - output( lowerCamelCase_, lowerCamelCase_) def __magic_name__( lowerCamelCase): __lowerCAmelCase = 0 for i in range(len(lowerCamelCase_) - 1): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def __magic_name__( lowerCamelCase, lowerCamelCase): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def __magic_name__( lowerCamelCase, lowerCamelCase): if data_set == "train": return _hypothesis_value(train_data[example_no][0]) elif data_set == "test": return _hypothesis_value(test_data[example_no][0]) return None def __magic_name__( lowerCamelCase, lowerCamelCase=m): __lowerCAmelCase = 0 for i in range(lowerCamelCase_): if index == -1: summation_value += _error(lowerCamelCase_) else: summation_value += _error(lowerCamelCase_) * train_data[i][0][index] return summation_value def __magic_name__( lowerCamelCase): __lowerCAmelCase = summation_of_cost_derivative(lowerCamelCase_, lowerCamelCase_) / m return cost_derivative_value def __magic_name__( ): global parameter_vector # Tune these values to set a tolerance value for predicted output __lowerCAmelCase = 0.00_00_02 __lowerCAmelCase = 0 __lowerCAmelCase = 0 while True: j += 1 __lowerCAmelCase = [0, 0, 0, 0] for i in range(0, len(lowerCamelCase_)): __lowerCAmelCase = get_cost_derivative(i - 1) __lowerCAmelCase = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowerCamelCase_, lowerCamelCase_, atol=lowerCamelCase_, rtol=lowerCamelCase_, ): break __lowerCAmelCase = temp_parameter_vector print(('''Number of iterations:''', j)) def __magic_name__( ): for i in range(len(lowerCamelCase_)): print(('''Actual output value:''', output(lowerCamelCase_, '''test'''))) print(('''Hypothesis output:''', calculate_hypothesis_value(lowerCamelCase_, '''test'''))) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
367
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class a__ ( unittest.TestCase ): """simple docstring""" def __init__(self , __lowercase , __lowercase = True , __lowercase = None , __lowercase = 32 , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = True , __lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowercase = True , __lowercase=7 , __lowercase=30 , __lowercase=4_00 , __lowercase=3 , ): __lowerCAmelCase = parent __lowerCAmelCase = do_resize __lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_88} __lowerCAmelCase = size_divisor __lowerCAmelCase = do_rescale __lowerCAmelCase = rescale_factor __lowerCAmelCase = do_normalize __lowerCAmelCase = do_center_crop __lowerCAmelCase = image_mean __lowerCAmelCase = image_std __lowerCAmelCase = do_pad __lowerCAmelCase = batch_size __lowerCAmelCase = num_channels __lowerCAmelCase = min_resolution __lowerCAmelCase = max_resolution def _snake_case (self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _snake_case (self , __lowercase , __lowercase=False ): if not batched: __lowerCAmelCase = self.size['''shortest_edge'''] __lowerCAmelCase = image_inputs[0] if isinstance(__lowercase , Image.Image ): __lowerCAmelCase , __lowerCAmelCase = image.size else: __lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2] __lowerCAmelCase = size / min(__lowercase , __lowercase ) if h < w: __lowerCAmelCase , __lowerCAmelCase = size, scale * w else: __lowerCAmelCase , __lowerCAmelCase = scale * h, size __lowerCAmelCase = int((13_33 / 8_00) * size ) if max(__lowercase , __lowercase ) > max_size: __lowerCAmelCase = max_size / max(__lowercase , __lowercase ) __lowerCAmelCase = newh * scale __lowerCAmelCase = neww * scale __lowerCAmelCase , __lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 ) __lowerCAmelCase , __lowerCAmelCase = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __lowerCAmelCase = [] for image in image_inputs: __lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[0] )[0] __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None def _snake_case (self ): __lowerCAmelCase = BridgeTowerImageProcessingTester(self ) @property def _snake_case (self ): return self.image_processor_tester.prepare_image_processor_dict() def _snake_case (self ): __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowercase , '''image_std''' ) ) self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowercase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowercase , '''size''' ) ) self.assertTrue(hasattr(__lowercase , '''size_divisor''' ) ) def _snake_case (self ): pass def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , Image.Image ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , np.ndarray ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , torch.Tensor ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
9
0
'''simple docstring''' import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder _UpperCAmelCase : str = """base_with_context""" def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''])) __lowerCAmelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding''']), requires_grad=__UpperCamelCase) for lyr_num, lyr in enumerate(model.encoders): __lowerCAmelCase = weights[F"""layers_{lyr_num}"""] __lowerCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''])) __lowerCAmelCase = ly_weight['''attention'''] __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''])) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''])) return model def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding''']), requires_grad=__UpperCamelCase) for lyr_num, lyr in enumerate(model.encoders): __lowerCAmelCase = weights[F"""layers_{lyr_num}"""] __lowerCAmelCase = ly_weight['''attention'''] __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''])) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''])) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''])) return model def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding''']), requires_grad=__UpperCamelCase) __lowerCAmelCase = nn.Parameter( torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T)) for lyr_num, lyr in enumerate(model.decoders): __lowerCAmelCase = weights[F"""layers_{lyr_num}"""] __lowerCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''])) __lowerCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T)) __lowerCAmelCase = ly_weight['''self_attention'''] __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T)) __lowerCAmelCase = ly_weight['''MultiHeadDotProductAttention_0'''] __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''])) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''])) __lowerCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T)) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''])) __lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T)) return model def __magic_name__( lowerCamelCase): __lowerCAmelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path) __lowerCAmelCase = jnp.tree_util.tree_map(onp.array, __UpperCamelCase) __lowerCAmelCase = [ '''from __gin__ import dynamic_registration''', '''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''', '''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''', '''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''', ] __lowerCAmelCase = os.path.join(args.checkpoint_path, '''..''', '''config.gin''') __lowerCAmelCase = inference.parse_training_gin_file(__UpperCamelCase, __UpperCamelCase) __lowerCAmelCase = inference.InferenceModel(args.checkpoint_path, __UpperCamelCase) __lowerCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''', variance_type='''fixed_large''') __lowerCAmelCase = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['''inputs'''], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', ) __lowerCAmelCase = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length['''targets_context'''], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', ) __lowerCAmelCase = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length['''targets_context'''], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, ) __lowerCAmelCase = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''], __UpperCamelCase) __lowerCAmelCase = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''], __UpperCamelCase) __lowerCAmelCase = load_decoder(ta_checkpoint['''target''']['''decoder'''], __UpperCamelCase) __lowerCAmelCase = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''') __lowerCAmelCase = SpectrogramDiffusionPipeline( notes_encoder=__UpperCamelCase, continuous_encoder=__UpperCamelCase, decoder=__UpperCamelCase, scheduler=__UpperCamelCase, melgan=__UpperCamelCase, ) if args.save: pipe.save_pretrained(args.output_path) if __name__ == "__main__": _UpperCAmelCase : str = argparse.ArgumentParser() parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""") parser.add_argument( """--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not.""" ) parser.add_argument( """--checkpoint_path""", default=f"""{MODEL}/checkpoint_500000""", type=str, required=False, help="""Path to the original jax model checkpoint.""", ) _UpperCAmelCase : str = parser.parse_args() main(args)
368
'''simple docstring''' # Imports import numpy as np class a__ : """simple docstring""" def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase ) def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): if red is not None: __lowerCAmelCase = red if green is not None: __lowerCAmelCase = green if blue is not None: __lowerCAmelCase = blue if red_edge is not None: __lowerCAmelCase = red_edge if nir is not None: __lowerCAmelCase = nir return True def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase ) __lowerCAmelCase = { '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('''Index not in the list!''' ) return False def _snake_case (self ): return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def _snake_case (self ): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _snake_case (self ): return self.nir * (self.red / (self.green**2)) def _snake_case (self ): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _snake_case (self ): return (self.nir - self.red) / (self.nir + self.red) def _snake_case (self ): return (self.nir - self.blue) / (self.nir + self.blue) def _snake_case (self ): return (self.redEdge - self.red) / (self.redEdge + self.red) def _snake_case (self ): return (self.nir - self.green) / (self.nir + self.green) def _snake_case (self ): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _snake_case (self ): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _snake_case (self ): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _snake_case (self ): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _snake_case (self ): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _snake_case (self ): return (self.nir / self.green) - 1 def _snake_case (self ): return (self.nir / self.redEdge) - 1 def _snake_case (self ): return (self.red - self.blue) / self.red def _snake_case (self ): __lowerCAmelCase = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _snake_case (self ): return self.nir - self.green def _snake_case (self ): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _snake_case (self ): __lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def _snake_case (self , __lowercase=0.1_6 ): return (self.nir - self.green) / (self.nir + self.green + y) def _snake_case (self , __lowercase=0.5 ): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _snake_case (self ): return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def _snake_case (self , __lowercase=None , __lowercase=None ): return (self.nir - b) / (a * self.red) def _snake_case (self ): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _snake_case (self ): return (self.red + self.green + self.blue) / 3_0.5 def _snake_case (self ): return self.nir / self.red def _snake_case (self ): return (self.rvi() - 1) / (self.rvi() + 1) def _snake_case (self ): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _snake_case (self ): return self.green / (self.nir + self.red + self.green) def _snake_case (self ): return self.nir / (self.nir + self.red + self.green) def _snake_case (self ): return self.red / (self.nir + self.red + self.green) def _snake_case (self ): return (self.green - self.red) / (self.green + self.red) def _snake_case (self ): return (self.red - self.green) / (self.red + self.green) def _snake_case (self ): __lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) __lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _snake_case (self ): return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _snake_case (self ): return self.nir / self.red def _snake_case (self ): return (self.ndvi() + 0.5) ** (1 / 2) def _snake_case (self ): return (self.nir - self.redEdge) / (self.nir + self.redEdge)
9
0
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class a__ ( _lowerCAmelCase ): """simple docstring""" __UpperCamelCase : Optional[int] = (DDPMParallelScheduler,) def _snake_case (self , **__lowercase ): __lowerCAmelCase = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**_lowercase ) return config def _snake_case (self ): for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_lowercase ) def _snake_case (self ): for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase ) def _snake_case (self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowercase ) def _snake_case (self ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_lowercase ) def _snake_case (self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowercase ) def _snake_case (self ): self.check_over_configs(thresholding=_lowercase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , ) def _snake_case (self ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_lowercase ) def _snake_case (self ): for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=_lowercase ) def _snake_case (self ): __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**_lowercase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5 def _snake_case (self ): __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**_lowercase ) __lowerCAmelCase = len(_lowercase ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter __lowerCAmelCase = self.dummy_sample_deter + 0.1 __lowerCAmelCase = self.dummy_sample_deter - 0.1 __lowerCAmelCase = samplea.shape[0] __lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 ) __lowerCAmelCase = torch.arange(_lowercase )[0:3, None].repeat(1 , _lowercase ) __lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __lowerCAmelCase = scheduler.batch_step_no_noise(_lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) __lowerCAmelCase = torch.sum(torch.abs(_lowercase ) ) __lowerCAmelCase = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 11_53.18_33 ) < 1e-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3 def _snake_case (self ): __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**_lowercase ) __lowerCAmelCase = len(_lowercase ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter __lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(_lowercase ) ): # 1. predict noise residual __lowerCAmelCase = model(_lowercase , _lowercase ) # 2. predict previous mean of sample x_t-1 __lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample __lowerCAmelCase = pred_prev_sample __lowerCAmelCase = torch.sum(torch.abs(_lowercase ) ) __lowerCAmelCase = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def _snake_case (self ): __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) __lowerCAmelCase = scheduler_class(**_lowercase ) __lowerCAmelCase = len(_lowercase ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter __lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(_lowercase ) ): # 1. predict noise residual __lowerCAmelCase = model(_lowercase , _lowercase ) # 2. predict previous mean of sample x_t-1 __lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample __lowerCAmelCase = pred_prev_sample __lowerCAmelCase = torch.sum(torch.abs(_lowercase ) ) __lowerCAmelCase = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def _snake_case (self ): __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**_lowercase ) __lowerCAmelCase = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_lowercase ) __lowerCAmelCase = scheduler.timesteps for i, timestep in enumerate(_lowercase ): if i == len(_lowercase ) - 1: __lowerCAmelCase = -1 else: __lowerCAmelCase = timesteps[i + 1] __lowerCAmelCase = scheduler.previous_timestep(_lowercase ) __lowerCAmelCase = prev_t.item() self.assertEqual(_lowercase , _lowercase ) def _snake_case (self ): __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**_lowercase ) __lowerCAmelCase = [1_00, 87, 50, 51, 0] with self.assertRaises(_lowercase , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=_lowercase ) def _snake_case (self ): __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**_lowercase ) __lowerCAmelCase = [1_00, 87, 50, 1, 0] __lowerCAmelCase = len(_lowercase ) with self.assertRaises(_lowercase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase ) def _snake_case (self ): __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**_lowercase ) __lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( _lowercase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=_lowercase )
369
'''simple docstring''' from math import sqrt def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' must been an int and positive" __lowerCAmelCase = True # 0 and 1 are none primes. if number <= 1: __lowerCAmelCase = False for divisor in range(2, int(round(sqrt(lowerCamelCase))) + 1): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __lowerCAmelCase = False break # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'status' must been from type bool" return status def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __lowerCAmelCase = list(range(2, n + 1)) __lowerCAmelCase = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCamelCase)): for j in range(i + 1, len(lowerCamelCase)): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __lowerCAmelCase = 0 # filters actual prime numbers. __lowerCAmelCase = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2" __lowerCAmelCase = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2, n + 1): if is_prime(lowerCamelCase): ans.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and number >= 0, "'number' must been an int and >= 0" __lowerCAmelCase = [] # this list will be returns of the function. # potential prime number factors. __lowerCAmelCase = 2 __lowerCAmelCase = number if number == 0 or number == 1: ans.append(lowerCamelCase) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCamelCase): while quotient != 1: if is_prime(lowerCamelCase) and (quotient % factor == 0): ans.append(lowerCamelCase) quotient /= factor else: factor += 1 else: ans.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCAmelCase = 0 # prime factorization of 'number' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = max(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCAmelCase = 0 # prime factorization of 'number' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = min(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int" assert isinstance(number % 2 == 0, lowerCamelCase), "compare bust been from type bool" return number % 2 == 0 def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int" assert isinstance(number % 2 != 0, lowerCamelCase), "compare bust been from type bool" return number % 2 != 0 def __magic_name__( lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and (number > 2) and is_even(lowerCamelCase) ), "'number' must been an int, even and > 2" __lowerCAmelCase = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __lowerCAmelCase = get_prime_numbers(lowerCamelCase) __lowerCAmelCase = len(lowerCamelCase) # run variable for while-loops. __lowerCAmelCase = 0 __lowerCAmelCase = None # exit variable. for break up the loops __lowerCAmelCase = True while i < len_pn and loop: __lowerCAmelCase = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __lowerCAmelCase = False ans.append(prime_numbers[i]) ans.append(prime_numbers[j]) j += 1 i += 1 # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (len(lowerCamelCase) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0]) and is_prime(ans[1]) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __lowerCAmelCase = 0 while numbera != 0: __lowerCAmelCase = numbera % numbera __lowerCAmelCase = numbera __lowerCAmelCase = rest # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __lowerCAmelCase = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = prime_factorization(lowerCamelCase) elif numbera == 1 or numbera == 1: __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = max(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(max(lowerCamelCase, lowerCamelCase)): ans *= n else: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(lowerCamelCase): ans *= n done.append(lowerCamelCase) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(lowerCamelCase): ans *= n done.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'number' must been a positive int" __lowerCAmelCase = 0 __lowerCAmelCase = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCamelCase): ans += 1 # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and is_prime( lowerCamelCase), "'ans' must been a prime number and from type int" return ans def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( is_prime(lowerCamelCase) and is_prime(lowerCamelCase) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __lowerCAmelCase = p_number_a + 1 # jump to the next number __lowerCAmelCase = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCamelCase): number += 1 while number < p_number_a: ans.append(lowerCamelCase) number += 1 # fetch the next prime number. while not is_prime(lowerCamelCase): number += 1 # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and ans[0] != p_number_a and ans[len(lowerCamelCase) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 1), "'n' must been int and >= 1" __lowerCAmelCase = [] # will be returned. for divisor in range(1, n + 1): if n % divisor == 0: ans.append(lowerCamelCase) # precondition assert ans[0] == 1 and ans[len(lowerCamelCase) - 1] == n, "Error in function getDivisiors(...)" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number > 1 ), "'number' must been an int and >= 1" __lowerCAmelCase = get_divisors(lowerCamelCase) # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (divisors[0] == 1) and (divisors[len(lowerCamelCase) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1]) == number def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __lowerCAmelCase = gcd(abs(lowerCamelCase), abs(lowerCamelCase)) # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been a int and >= 0" __lowerCAmelCase = 1 # this will be return. for factor in range(1, n + 1): ans *= factor return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been an int and >= 0" __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = 1 # this will be return for _ in range(n - 1): __lowerCAmelCase = ans ans += fiba __lowerCAmelCase = tmp return ans
9
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) _UpperCAmelCase : str = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys _UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
370
'''simple docstring''' import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed _UpperCAmelCase : Dict = """true""" def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6): set_seed(4_2) __lowerCAmelCase = RegressionModel() __lowerCAmelCase = deepcopy(lowerCamelCase) __lowerCAmelCase = RegressionDataset(length=lowerCamelCase) __lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase) model.to(accelerator.device) __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase) return model, ddp_model, dataloader def __magic_name__( lowerCamelCase, lowerCamelCase=False): __lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''') __lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''') def tokenize_function(lowerCamelCase): __lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase) return outputs with accelerator.main_process_first(): __lowerCAmelCase = dataset.map( lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) __lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''') def collate_fn(lowerCamelCase): if use_longest: return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''') return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''') return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6) def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase) __lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [] for batch in dataloader: __lowerCAmelCase , __lowerCAmelCase = batch.values() with torch.no_grad(): __lowerCAmelCase = model(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) __lowerCAmelCase , __lowerCAmelCase = [], [] for logit, targ in logits_and_targets: logits.append(lowerCamelCase) targs.append(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase) return logits, targs def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase) assert ( len(lowerCamelCase) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}""" def __magic_name__( lowerCamelCase = False, lowerCamelCase = False): __lowerCAmelCase = evaluate.load('''glue''', '''mrpc''') __lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase) # First do baseline __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no'''] model.to(lowerCamelCase) model.eval() for batch in dataloader: batch.to(lowerCamelCase) with torch.inference_mode(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=lowerCamelCase, references=batch['''labels''']) __lowerCAmelCase = metric.compute() # Then do distributed __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) __lowerCAmelCase = batch['''labels'''] __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase) __lowerCAmelCase = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def __magic_name__( ): __lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''') for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""") test_mrpc(lowerCamelCase, lowerCamelCase) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''') for split_batches in [True, False]: for dispatch_batches in [True, False]: __lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""") test_torch_metrics(lowerCamelCase, 9_9) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''') __lowerCAmelCase = Accelerator() test_torch_metrics(lowerCamelCase, 5_1_2) accelerator.state._reset_state() def __magic_name__( lowerCamelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
9
0
'''simple docstring''' import mpmath # for roots of unity import numpy as np class a__ : """simple docstring""" def __init__(self , __lowercase=None , __lowercase=None ): __lowerCAmelCase = list(poly_a or [0] )[:] __lowerCAmelCase = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() __lowerCAmelCase = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() __lowerCAmelCase = len(self.polyB ) # Add 0 to make lengths equal a power of 2 __lowerCAmelCase = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform __lowerCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product __lowerCAmelCase = self.__multiply() def _snake_case (self , __lowercase ): __lowerCAmelCase = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB] # Corner case if len(_A ) <= 1: return dft[0] # __lowerCAmelCase = self.c_max_length // 2 while next_ncol > 0: __lowerCAmelCase = [[] for i in range(_A )] __lowerCAmelCase = self.root**next_ncol # First half of next step __lowerCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(_A ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step __lowerCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(_A ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update __lowerCAmelCase = new_dft __lowerCAmelCase = next_ncol // 2 return dft[0] def _snake_case (self ): __lowerCAmelCase = self.__dft('''A''' ) __lowerCAmelCase = self.__dft('''B''' ) __lowerCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT __lowerCAmelCase = 2 while next_ncol <= self.c_max_length: __lowerCAmelCase = [[] for i in range(_A )] __lowerCAmelCase = self.root ** (next_ncol // 2) __lowerCAmelCase = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update __lowerCAmelCase = new_inverse_c next_ncol *= 2 # Unpack __lowerCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__(self ): __lowerCAmelCase = 'A = ' + ' + '.join( F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) ) __lowerCAmelCase = 'B = ' + ' + '.join( F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) ) __lowerCAmelCase = 'A*B = ' + ' + '.join( F"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) ) return F"""{a}\n{b}\n{c}""" # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
371
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : str = logging.get_logger(__name__) _UpperCAmelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class a__ ( __A ): """simple docstring""" __UpperCamelCase : str = 'roberta' def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ): super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = use_cache __lowerCAmelCase = classifier_dropout class a__ ( __A ): """simple docstring""" @property def _snake_case (self ): if self.task == "multiple-choice": __lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
9
0
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class a__ : """simple docstring""" def __init__(self , __lowercase , __lowercase=13 , __lowercase=10 , __lowercase=3 , __lowercase=2 , __lowercase=2 , __lowercase=2 , __lowercase=True , __lowercase=True , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=10 , __lowercase=0.0_2 , __lowercase=0.9 , __lowercase=None , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = image_size __lowerCAmelCase = num_channels __lowerCAmelCase = patch_size __lowerCAmelCase = tubelet_size __lowerCAmelCase = num_frames __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = mask_ratio __lowerCAmelCase = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame __lowerCAmelCase = (image_size // patch_size) ** 2 __lowerCAmelCase = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos __lowerCAmelCase = int(mask_ratio * self.seq_length ) def _snake_case (self ): __lowerCAmelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = self.get_config() return config, pixel_values, labels def _snake_case (self ): return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , ) def _snake_case (self , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = VideoMAEModel(config=__lowercase ) model.to(__lowercase ) model.eval() __lowerCAmelCase = model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case (self , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = VideoMAEForPreTraining(__lowercase ) model.to(__lowercase ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch __lowerCAmelCase = torch.ones((self.num_masks,) ) __lowerCAmelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) __lowerCAmelCase = mask.expand(self.batch_size , -1 ).bool() __lowerCAmelCase = model(__lowercase , __lowercase ) # model only returns predictions for masked patches __lowerCAmelCase = mask.sum().item() __lowerCAmelCase = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def _snake_case (self ): __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase = config_and_inputs __lowerCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" __UpperCamelCase : str = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) __UpperCamelCase : List[str] = ( {'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification} if is_torch_available() else {} ) __UpperCamelCase : str = False __UpperCamelCase : Any = False __UpperCamelCase : Union[str, Any] = False __UpperCamelCase : int = False def _snake_case (self ): __lowerCAmelCase = VideoMAEModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 ) def _snake_case (self , __lowercase , __lowercase , __lowercase=False ): __lowerCAmelCase = copy.deepcopy(__lowercase ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch __lowerCAmelCase = torch.ones((self.model_tester.num_masks,) ) __lowerCAmelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) __lowerCAmelCase = mask.expand(self.model_tester.batch_size , -1 ).bool() __lowerCAmelCase = bool_masked_pos.to(__lowercase ) if return_labels: if model_class in [ *get_values(__lowercase ), ]: __lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) return inputs_dict def _snake_case (self ): self.config_tester.run_common_tests() @unittest.skip(reason='''VideoMAE does not use inputs_embeds''' ) def _snake_case (self ): pass def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(__lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(__lowercase ) __lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def _snake_case (self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowercase ) @slow def _snake_case (self ): for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = VideoMAEModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def _snake_case (self ): if not self.has_attentions: pass else: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = True for model_class in self.all_model_classes: __lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks __lowerCAmelCase = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) __lowerCAmelCase = True __lowerCAmelCase = False __lowerCAmelCase = True __lowerCAmelCase = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): __lowerCAmelCase = model(**self._prepare_for_class(__lowercase , __lowercase ) ) __lowerCAmelCase = outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCAmelCase = True __lowerCAmelCase = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): __lowerCAmelCase = model(**self._prepare_for_class(__lowercase , __lowercase ) ) __lowerCAmelCase = outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __lowerCAmelCase = len(__lowercase ) # Check attention is always last and order is fine __lowerCAmelCase = True __lowerCAmelCase = True __lowerCAmelCase = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): __lowerCAmelCase = model(**self._prepare_for_class(__lowercase , __lowercase ) ) self.assertEqual(out_len + 1 , len(__lowercase ) ) __lowerCAmelCase = outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _snake_case (self ): def check_hidden_states_output(__lowercase , __lowercase , __lowercase ): __lowerCAmelCase = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): __lowerCAmelCase = model(**self._prepare_for_class(__lowercase , __lowercase ) ) __lowerCAmelCase = outputs.hidden_states __lowerCAmelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__lowercase ) , __lowercase ) __lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks __lowerCAmelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCAmelCase = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _snake_case (self ): pass def __magic_name__( ): __lowerCAmelCase = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''') __lowerCAmelCase = np.load(A__) return list(A__) @require_torch @require_vision class a__ ( unittest.TestCase ): """simple docstring""" @cached_property def _snake_case (self ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _snake_case (self ): __lowerCAmelCase = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to( __lowercase ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_video() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): __lowerCAmelCase = model(**__lowercase ) # verify the logits __lowerCAmelCase = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __lowercase ) __lowerCAmelCase = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) ) @slow def _snake_case (self ): __lowerCAmelCase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(__lowercase ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_video() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) # add boolean mask, indicating which patches to mask __lowerCAmelCase = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) __lowerCAmelCase = torch.load(__lowercase ) # forward pass with torch.no_grad(): __lowerCAmelCase = model(**__lowercase ) # verify the logits __lowerCAmelCase = torch.Size([1, 14_08, 15_36] ) __lowerCAmelCase = torch.tensor( [[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=__lowercase ) self.assertEqual(outputs.logits.shape , __lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowercase , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) __lowerCAmelCase = torch.tensor([0.5_1_4_2] , device=__lowercase ) self.assertTrue(torch.allclose(outputs.loss , __lowercase , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) __lowerCAmelCase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=__lowercase ).to( __lowercase ) with torch.no_grad(): __lowerCAmelCase = model(**__lowercase ) __lowerCAmelCase = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=__lowercase ) self.assertTrue(torch.allclose(outputs.loss , __lowercase , atol=1e-4 ) )
350
'''simple docstring''' import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = old_name if "patch_embed" in old_name: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_name.split('''.''') if layer == "0": __lowerCAmelCase = old_name.replace('''0''', '''convolution1''') elif layer == "1": __lowerCAmelCase = old_name.replace('''1''', '''batchnorm_before''') elif layer == "3": __lowerCAmelCase = old_name.replace('''3''', '''convolution2''') else: __lowerCAmelCase = old_name.replace('''4''', '''batchnorm_after''') if "network" in old_name and re.search(r'''\d\.\d''', lowerCamelCase): __lowerCAmelCase = r'''\b\d{2}\b''' if bool(re.search(lowerCamelCase, lowerCamelCase)): __lowerCAmelCase = re.search(r'''\d\.\d\d.''', lowerCamelCase).group() else: __lowerCAmelCase = re.search(r'''\d\.\d.''', lowerCamelCase).group() if int(match[0]) < 6: __lowerCAmelCase = old_name.replace(lowerCamelCase, '''''') __lowerCAmelCase = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1]) __lowerCAmelCase = '''intermediate_stages.''' + trimmed_name else: __lowerCAmelCase = old_name.replace(lowerCamelCase, '''''') if int(match[2]) < num_meta4D_last_stage: __lowerCAmelCase = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2]) else: __lowerCAmelCase = str(int(match[2]) - num_meta4D_last_stage) __lowerCAmelCase = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index) if "norm1" in old_name: __lowerCAmelCase = trimmed_name.replace('''norm1''', '''layernorm1''') elif "norm2" in old_name: __lowerCAmelCase = trimmed_name.replace('''norm2''', '''layernorm2''') elif "fc1" in old_name: __lowerCAmelCase = trimmed_name.replace('''fc1''', '''linear_in''') elif "fc2" in old_name: __lowerCAmelCase = trimmed_name.replace('''fc2''', '''linear_out''') __lowerCAmelCase = '''last_stage.''' + trimmed_name elif "network" in old_name and re.search(r'''.\d.''', lowerCamelCase): __lowerCAmelCase = old_name.replace('''network''', '''intermediate_stages''') if "fc" in new_name: __lowerCAmelCase = new_name.replace('''fc''', '''convolution''') elif ("norm1" in new_name) and ("layernorm1" not in new_name): __lowerCAmelCase = new_name.replace('''norm1''', '''batchnorm_before''') elif ("norm2" in new_name) and ("layernorm2" not in new_name): __lowerCAmelCase = new_name.replace('''norm2''', '''batchnorm_after''') if "proj" in new_name: __lowerCAmelCase = new_name.replace('''proj''', '''projection''') if "dist_head" in new_name: __lowerCAmelCase = new_name.replace('''dist_head''', '''distillation_classifier''') elif "head" in new_name: __lowerCAmelCase = new_name.replace('''head''', '''classifier''') elif "patch_embed" in new_name: __lowerCAmelCase = '''efficientformer.''' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": __lowerCAmelCase = new_name.replace('''norm''', '''layernorm''') __lowerCAmelCase = '''efficientformer.''' + new_name else: __lowerCAmelCase = '''efficientformer.encoder.''' + new_name return new_name def __magic_name__( lowerCamelCase, lowerCamelCase): for key in checkpoint.copy().keys(): __lowerCAmelCase = checkpoint.pop(lowerCamelCase) __lowerCAmelCase = val return checkpoint def __magic_name__( ): __lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw) return image def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model'''] __lowerCAmelCase = EfficientFormerConfig.from_json_file(lowerCamelCase) __lowerCAmelCase = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase) __lowerCAmelCase = '''_'''.join(checkpoint_path.split('''/''')[-1].split('''.''')[0].split('''_''')[:-1]) __lowerCAmelCase = config.depths[-1] - config.num_metaad_blocks + 1 __lowerCAmelCase = convert_torch_checkpoint(lowerCamelCase, lowerCamelCase) model.load_state_dict(lowerCamelCase) model.eval() __lowerCAmelCase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } # prepare image __lowerCAmelCase = prepare_img() __lowerCAmelCase = 2_5_6 __lowerCAmelCase = 2_2_4 __lowerCAmelCase = EfficientFormerImageProcessor( size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], ) __lowerCAmelCase = processor(images=lowerCamelCase, return_tensors='''pt''').pixel_values # original processing pipeline __lowerCAmelCase = Compose( [ Resize(lowerCamelCase, interpolation=pillow_resamplings['''bicubic''']), CenterCrop(lowerCamelCase), ToTensor(), Normalize(lowerCamelCase, lowerCamelCase), ]) __lowerCAmelCase = image_transforms(lowerCamelCase).unsqueeze(0) assert torch.allclose(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = model(lowerCamelCase) __lowerCAmelCase = outputs.logits __lowerCAmelCase = (1, 1_0_0_0) if "l1" in model_name: __lowerCAmelCase = torch.Tensor( [-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28]) assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3) assert logits.shape == expected_shape elif "l3" in model_name: __lowerCAmelCase = torch.Tensor( [-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27]) assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3) assert logits.shape == expected_shape elif "l7" in model_name: __lowerCAmelCase = torch.Tensor( [-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78]) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""") # Save Checkpoints Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) model.save_pretrained(lowerCamelCase) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""") processor.save_pretrained(lowerCamelCase) print(F"""Processor successfuly saved at {pytorch_dump_path}""") if push_to_hub: print('''Pushing model to the hub...''') model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=lowerCamelCase, ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=lowerCamelCase, ) if __name__ == "__main__": _UpperCAmelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) _UpperCAmelCase : List[str] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
9
0
'''simple docstring''' import argparse import os import re import packaging.version _UpperCAmelCase : Optional[int] = """examples/""" _UpperCAmelCase : Dict = { """examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""), """doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } _UpperCAmelCase : str = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } _UpperCAmelCase : Optional[Any] = """README.md""" def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): with open(lowerCAmelCase__, '''r''', encoding='''utf-8''', newline='''\n''') as f: __lowerCAmelCase = f.read() __lowerCAmelCase = REPLACE_PATTERNS[pattern] __lowerCAmelCase = replace.replace('''VERSION''', lowerCAmelCase__) __lowerCAmelCase = re_pattern.sub(lowerCAmelCase__, lowerCAmelCase__) with open(lowerCAmelCase__, '''w''', encoding='''utf-8''', newline='''\n''') as f: f.write(lowerCAmelCase__) def __magic_name__( lowerCamelCase): for folder, directories, fnames in os.walk(lowerCAmelCase__): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''') if "legacy" in directories: directories.remove('''legacy''') for fname in fnames: if fname.endswith('''.py'''): update_version_in_file(os.path.join(lowerCAmelCase__, lowerCAmelCase__), lowerCAmelCase__, pattern='''examples''') def __magic_name__( lowerCamelCase, lowerCamelCase=False): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) if not patch: update_version_in_examples(lowerCAmelCase__) def __magic_name__( ): __lowerCAmelCase = '''🤗 Transformers currently provides the following architectures''' __lowerCAmelCase = '''1. Want to contribute a new model?''' with open(lowerCAmelCase__, '''r''', encoding='''utf-8''', newline='''\n''') as f: __lowerCAmelCase = f.readlines() # Find the start of the list. __lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt): start_index += 1 start_index += 1 __lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt): if lines[index].startswith('''1.'''): __lowerCAmelCase = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''', '''https://huggingface.co/docs/transformers/model_doc''', ) index += 1 with open(lowerCAmelCase__, '''w''', encoding='''utf-8''', newline='''\n''') as f: f.writelines(lowerCAmelCase__) def __magic_name__( ): with open(REPLACE_FILES['''init'''], '''r''') as f: __lowerCAmelCase = f.read() __lowerCAmelCase = REPLACE_PATTERNS['''init'''][0].search(lowerCAmelCase__).groups()[0] return packaging.version.parse(lowerCAmelCase__) def __magic_name__( lowerCamelCase=False): __lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''') if default_version.is_devrelease: __lowerCAmelCase = default_version.base_version elif patch: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""") if len(lowerCAmelCase__) == 0: __lowerCAmelCase = default_version print(F"""Updating version to {version}.""") global_version_update(lowerCAmelCase__, patch=lowerCAmelCase__) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''') clean_main_ref_in_model_list() def __magic_name__( ): __lowerCAmelCase = get_version() __lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __lowerCAmelCase = current_version.base_version # Check with the user we got that right. __lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""") if len(lowerCAmelCase__) == 0: __lowerCAmelCase = dev_version print(F"""Updating version to {version}.""") global_version_update(lowerCAmelCase__) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''') clean_main_ref_in_model_list() if __name__ == "__main__": _UpperCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") _UpperCAmelCase : str = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
351
'''simple docstring''' from __future__ import annotations import math def __magic_name__( lowerCamelCase, lowerCamelCase): if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2: raise Exception('''Matrices are not 2x2''') __lowerCAmelCase = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def __magic_name__( lowerCamelCase, lowerCamelCase): return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(lowerCamelCase)) ] def __magic_name__( lowerCamelCase, lowerCamelCase): return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(lowerCamelCase)) ] def __magic_name__( lowerCamelCase): if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0: raise Exception('''Odd matrices are not supported!''') __lowerCAmelCase = len(lowerCamelCase) __lowerCAmelCase = matrix_length // 2 __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)] __lowerCAmelCase = [ [a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase) ] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)] return top_left, top_right, bot_left, bot_right def __magic_name__( lowerCamelCase): return len(lowerCamelCase), len(matrix[0]) def __magic_name__( lowerCamelCase): print('''\n'''.join(str(lowerCamelCase) for line in matrix)) def __magic_name__( lowerCamelCase, lowerCamelCase): if matrix_dimensions(lowerCamelCase) == (2, 2): return default_matrix_multiplication(lowerCamelCase, lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase) __lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase) __lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase) __lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase) # construct the new matrix from our 4 quadrants __lowerCAmelCase = [] for i in range(len(lowerCamelCase)): new_matrix.append(top_left[i] + top_right[i]) for i in range(len(lowerCamelCase)): new_matrix.append(bot_left[i] + bot_right[i]) return new_matrix def __magic_name__( lowerCamelCase, lowerCamelCase): if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]: __lowerCAmelCase = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(lowerCamelCase) __lowerCAmelCase = matrix_dimensions(lowerCamelCase) __lowerCAmelCase = matrix_dimensions(lowerCamelCase) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase) __lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase)))) __lowerCAmelCase = matrixa __lowerCAmelCase = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0, lowerCamelCase): if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): new_matrixa[i].append(0) else: new_matrixa.append([0] * maxim) if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): new_matrixa[i].append(0) else: new_matrixa.append([0] * maxim) __lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase) # Removing the additional zeros for i in range(0, lowerCamelCase): if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": _UpperCAmelCase : List[str] = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] _UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]] print(strassen(matrixa, matrixa))
9
0
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : Tuple = logging.get_logger(__name__) def __magic_name__( lowerCamelCase): __lowerCAmelCase = SwinConfig.from_pretrained( '''microsoft/swin-tiny-patch4-window7-224''', out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4''']) __lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCamelCase) __lowerCAmelCase = '''huggingface/label-files''' if "ade20k-full" in model_name: # this should be ok __lowerCAmelCase = 8_4_7 __lowerCAmelCase = '''maskformer-ade20k-full-id2label.json''' elif "ade" in model_name: # this should be ok __lowerCAmelCase = 1_5_0 __lowerCAmelCase = '''ade20k-id2label.json''' elif "coco-stuff" in model_name: # this should be ok __lowerCAmelCase = 1_7_1 __lowerCAmelCase = '''maskformer-coco-stuff-id2label.json''' elif "coco" in model_name: # TODO __lowerCAmelCase = 1_3_3 __lowerCAmelCase = '''coco-panoptic-id2label.json''' elif "cityscapes" in model_name: # this should be ok __lowerCAmelCase = 1_9 __lowerCAmelCase = '''cityscapes-id2label.json''' elif "vistas" in model_name: # this should be ok __lowerCAmelCase = 6_5 __lowerCAmelCase = '''mapillary-vistas-id2label.json''' __lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type='''dataset'''), '''r''')) __lowerCAmelCase = {int(lowerCamelCase): v for k, v in idalabel.items()} return config def __magic_name__( lowerCamelCase): __lowerCAmelCase = [] # stem # fmt: off rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''')) rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''')) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''')) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''')) # stages for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""")) if i < 3: rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""")) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""")) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""")) rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""")) rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""")) # FPN rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''')) rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''')) rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''')) for source_index, target_index in zip(range(3, 0, -1), range(0, 3)): rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""")) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""")) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""")) rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""")) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""")) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""")) rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''')) rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''')) # Transformer decoder for idx in range(config.decoder_config.decoder_layers): # self-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""")) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""")) # cross-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""")) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""")) # MLP 1 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""")) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""")) # MLP 2 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""")) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""")) # layernorm 1 (self-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""")) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""")) # layernorm 2 (cross-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""")) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""")) # layernorm 3 (final layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""")) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""")) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''')) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''')) # heads on top rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''')) rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''')) rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''')) rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''')) rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''')) for i in range(3): rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""")) rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""")) # fmt: on return rename_keys def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = dct.pop(lowerCamelCase) __lowerCAmelCase = val def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))] for i in range(len(backbone_config.depths)): __lowerCAmelCase = num_features[i] for j in range(backbone_config.depths[i]): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""") __lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""") # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[:dim, :] __lowerCAmelCase = in_proj_bias[: dim] __lowerCAmelCase = in_proj_weight[ dim : dim * 2, : ] __lowerCAmelCase = in_proj_bias[ dim : dim * 2 ] __lowerCAmelCase = in_proj_weight[ -dim :, : ] __lowerCAmelCase = in_proj_bias[-dim :] # fmt: on def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""") __lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""") # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[: hidden_size, :] __lowerCAmelCase = in_proj_bias[:config.hidden_size] __lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2] __lowerCAmelCase = in_proj_weight[-hidden_size :, :] __lowerCAmelCase = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""") __lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""") # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[: hidden_size, :] __lowerCAmelCase = in_proj_bias[:config.hidden_size] __lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2] __lowerCAmelCase = in_proj_weight[-hidden_size :, :] __lowerCAmelCase = in_proj_bias[-hidden_size :] # fmt: on def __magic_name__( ): __lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw) return im @torch.no_grad() def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False): __lowerCAmelCase = get_maskformer_config(lowerCamelCase) # load original state_dict with open(lowerCamelCase, '''rb''') as f: __lowerCAmelCase = pickle.load(lowerCamelCase) __lowerCAmelCase = data['''model'''] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys __lowerCAmelCase = create_rename_keys(lowerCamelCase) for src, dest in rename_keys: rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase) read_in_swin_q_k_v(lowerCamelCase, config.backbone_config) read_in_decoder_q_k_v(lowerCamelCase, lowerCamelCase) # update to torch tensors for key, value in state_dict.items(): __lowerCAmelCase = torch.from_numpy(lowerCamelCase) # load 🤗 model __lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCamelCase) model.eval() for name, param in model.named_parameters(): print(lowerCamelCase, param.shape) __lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(lowerCamelCase) == 0, F"""Unexpected keys: {unexpected_keys}""" # verify results __lowerCAmelCase = prepare_img() if "vistas" in model_name: __lowerCAmelCase = 6_5 elif "cityscapes" in model_name: __lowerCAmelCase = 6_5_5_3_5 else: __lowerCAmelCase = 2_5_5 __lowerCAmelCase = True if '''ade''' in model_name else False __lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCamelCase, reduce_labels=lowerCamelCase) __lowerCAmelCase = image_processor(lowerCamelCase, return_tensors='''pt''') __lowerCAmelCase = model(**lowerCamelCase) print('''Logits:''', outputs.class_queries_logits[0, :3, :3]) if model_name == "maskformer-swin-tiny-ade": __lowerCAmelCase = torch.tensor( [[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]]) assert torch.allclose(outputs.class_queries_logits[0, :3, :3], lowerCamelCase, atol=1E-4) print('''Looks ok!''') if pytorch_dump_folder_path is not None: print(F"""Saving model and image processor to {pytorch_dump_folder_path}""") Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) model.save_pretrained(lowerCamelCase) image_processor.save_pretrained(lowerCamelCase) if push_to_hub: print('''Pushing model and image processor to the hub...''') model.push_to_hub(F"""nielsr/{model_name}""") image_processor.push_to_hub(F"""nielsr/{model_name}""") if __name__ == "__main__": _UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _UpperCAmelCase : List[Any] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
352
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) __lowerCAmelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowercase , __lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase ) __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowercase ) self.assertIsInstance(processor_fast.tokenizer , __lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowercase ) self.assertIsInstance(processor_fast.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 ) __lowerCAmelCase = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' ) __lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = tokenizer(__lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__lowercase ) __lowerCAmelCase = tokenizer.batch_decode(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
9
0
'''simple docstring''' import baseaa def __magic_name__( lowerCamelCase): return baseaa.baaencode(string.encode('''utf-8''')) def __magic_name__( lowerCamelCase): return baseaa.baadecode(lowerCamelCase).decode('''utf-8''') if __name__ == "__main__": _UpperCAmelCase : Tuple = "Hello World!" _UpperCAmelCase : Union[str, Any] = baseaa_encode(test) print(encoded) _UpperCAmelCase : Tuple = baseaa_decode(encoded) print(decoded)
353
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class a__ ( __A ): """simple docstring""" def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ): __lowerCAmelCase = 1.0 if scale is None else scale __lowerCAmelCase = 0.0 if loc is None else loc super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] ) @property def _snake_case (self ): return self.base_dist.mean * self.scale + self.loc @property def _snake_case (self ): return self.base_dist.variance * self.scale**2 @property def _snake_case (self ): return self.variance.sqrt() class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ): super().__init__(**__lowercase ) __lowerCAmelCase = args_dim __lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] ) __lowerCAmelCase = domain_map def _snake_case (self , __lowercase ): __lowerCAmelCase = [proj(__lowercase ) for proj in self.proj] return self.domain_map(*__lowercase ) class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase ): super().__init__() __lowerCAmelCase = function def _snake_case (self , __lowercase , *__lowercase ): return self.function(__lowercase , *__lowercase ) class a__ : """simple docstring""" __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__(self , __lowercase = 1 ): __lowerCAmelCase = dim __lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim} def _snake_case (self , __lowercase ): if self.dim == 1: return self.distribution_class(*__lowercase ) else: return Independent(self.distribution_class(*__lowercase ) , 1 ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ): __lowerCAmelCase = self._base_distribution(__lowercase ) if loc is None and scale is None: return distr else: return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim ) @property def _snake_case (self ): return () if self.dim == 1 else (self.dim,) @property def _snake_case (self ): return len(self.event_shape ) @property def _snake_case (self ): return 0.0 def _snake_case (self , __lowercase ): return ParameterProjection( in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _snake_case (self , *__lowercase ): raise NotImplementedError() @staticmethod def _snake_case (__lowercase ): return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0 class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def _snake_case (cls , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) __lowerCAmelCase = 2.0 + cls.squareplus(__lowercase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def _snake_case (cls , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def _snake_case (cls , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _snake_case (self , __lowercase ): __lowerCAmelCase , __lowerCAmelCase = distr_args if self.dim == 1: return self.distribution_class(total_count=__lowercase , logits=__lowercase ) else: return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ): __lowerCAmelCase , __lowerCAmelCase = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
9
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _UpperCAmelCase : Any = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : List[Any] = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : str = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys _UpperCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
354
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class a__ ( __A ): """simple docstring""" __UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa' __UpperCamelCase : List[str] = ( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) __UpperCamelCase : Optional[int] = 'document_qa' __UpperCamelCase : Optional[int] = AutoProcessor __UpperCamelCase : Tuple = VisionEncoderDecoderModel __UpperCamelCase : Any = ['image', 'text'] __UpperCamelCase : Optional[Any] = ['text'] def __init__(self , *__lowercase , **__lowercase ): if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' __lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase ) __lowerCAmelCase = self.pre_processor.tokenizer( __lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids __lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _snake_case (self , __lowercase ): return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences def _snake_case (self , __lowercase ): __lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0] __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) __lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token __lowerCAmelCase = self.pre_processor.tokenajson(__lowercase ) return sequence["answer"]
9
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) _UpperCAmelCase : Any = { 'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json', # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class a__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Tuple = 'biogpt' def __init__(self , __lowercase=4_23_84 , __lowercase=10_24 , __lowercase=24 , __lowercase=16 , __lowercase=40_96 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=10_24 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=True , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=1 , __lowercase=0 , __lowercase=2 , **__lowercase , ): __lowerCAmelCase = vocab_size __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = scale_embedding __lowerCAmelCase = use_cache __lowerCAmelCase = layerdrop __lowerCAmelCase = activation_dropout super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
355
'''simple docstring''' def __magic_name__( lowerCamelCase): __lowerCAmelCase = 1 __lowerCAmelCase = 2 while i * i <= n: __lowerCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __magic_name__( ): __lowerCAmelCase = 1 __lowerCAmelCase = 1 while True: i += 1 t_num += i if count_divisors(lowerCamelCase) > 5_0_0: break return t_num if __name__ == "__main__": print(solution())
9
0
'''simple docstring''' from __future__ import annotations class a__ : """simple docstring""" def __init__(self , __lowercase ): __lowerCAmelCase = TypeError( '''Matrices must be formed from a list of zero or more lists containing at ''' '''least one and the same number of values, each of which must be of type ''' '''int or float.''' ) if len(_A ) != 0: __lowerCAmelCase = len(rows[0] ) if cols == 0: raise error for row in rows: if len(_A ) != cols: raise error for value in row: if not isinstance(_A , (int, float) ): raise error __lowerCAmelCase = rows else: __lowerCAmelCase = [] def _snake_case (self ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _snake_case (self ): return len(self.rows ) @property def _snake_case (self ): return len(self.rows[0] ) @property def _snake_case (self ): return (self.num_rows, self.num_columns) @property def _snake_case (self ): return self.order[0] == self.order[1] def _snake_case (self ): __lowerCAmelCase = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(_A ) def _snake_case (self ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _snake_case (self ): return bool(self.determinant() ) def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(_A ).determinant() def _snake_case (self , __lowercase , __lowercase ): if (row + column) % 2 == 0: return self.get_minor(_A , _A ) return -1 * self.get_minor(_A , _A ) def _snake_case (self ): return Matrix( [ [self.get_minor(_A , _A ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _snake_case (self ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _snake_case (self ): __lowerCAmelCase = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(_A ) def _snake_case (self ): __lowerCAmelCase = self.determinant() if not determinant: raise TypeError('''Only matrices with a non-zero determinant have an inverse''' ) return self.adjugate() * (1 / determinant) def __repr__(self ): return str(self.rows ) def __str__(self ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '''[''' + '''. '''.join([str(_A ) for value in row] ) + '''.]''' for row in self.rows ] ) + "]" ) def _snake_case (self , __lowercase , __lowercase = None ): __lowerCAmelCase = TypeError('''Row must be a list containing all ints and/or floats''' ) if not isinstance(_A , _A ): raise type_error for value in row: if not isinstance(_A , (int, float) ): raise type_error if len(_A ) != self.num_columns: raise ValueError( '''Row must be equal in length to the other rows in the matrix''' ) if position is None: self.rows.append(_A ) else: __lowerCAmelCase = self.rows[0:position] + [row] + self.rows[position:] def _snake_case (self , __lowercase , __lowercase = None ): __lowerCAmelCase = TypeError( '''Column must be a list containing all ints and/or floats''' ) if not isinstance(_A , _A ): raise type_error for value in column: if not isinstance(_A , (int, float) ): raise type_error if len(_A ) != self.num_rows: raise ValueError( '''Column must be equal in length to the other columns in the matrix''' ) if position is None: __lowerCAmelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: __lowerCAmelCase = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__(self , __lowercase ): if not isinstance(_A , _A ): return NotImplemented return self.rows == other.rows def __ne__(self , __lowercase ): return not self == other def __neg__(self ): return self * -1 def __add__(self , __lowercase ): if self.order != other.order: raise ValueError('''Addition requires matrices of the same order''' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__(self , __lowercase ): if self.order != other.order: raise ValueError('''Subtraction requires matrices of the same order''' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__(self , __lowercase ): if isinstance(_A , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(_A , _A ): if self.num_columns != other.num_rows: raise ValueError( '''The number of columns in the first matrix must ''' '''be equal to the number of rows in the second''' ) return Matrix( [ [Matrix.dot_product(_A , _A ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( '''A Matrix can only be multiplied by an int, float, or another matrix''' ) def __pow__(self , __lowercase ): if not isinstance(_A , _A ): raise TypeError('''A Matrix can only be raised to the power of an int''' ) if not self.is_square: raise ValueError('''Only square matrices can be raised to a power''' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( '''Only invertable matrices can be raised to a negative power''' ) __lowerCAmelCase = self for _ in range(other - 1 ): result *= self return result @classmethod def _snake_case (cls , __lowercase , __lowercase ): return sum(row[i] * column[i] for i in range(len(_A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
356
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) self.assertTrue(isinstance(dc.token_ids , __lowercase ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _snake_case (self ): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __lowerCAmelCase = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowercase ): DisjunctiveConstraint(__lowercase ) # fails here def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 ) __lowerCAmelCase = stepped is True and completed is True and reset is False self.assertTrue(__lowercase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
9
0
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase : Optional[int] = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys _UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
357
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets _UpperCAmelCase : List[str] = """ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ _UpperCAmelCase : str = """ Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric(\"mean_iou\") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ _UpperCAmelCase : Tuple = """\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }""" def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): if label_map is not None: for old_id, new_id in label_map.items(): __lowerCAmelCase = new_id # turn into Numpy arrays __lowerCAmelCase = np.array(lowerCamelCase) __lowerCAmelCase = np.array(lowerCamelCase) if reduce_labels: __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label - 1 __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label != ignore_index __lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = pred_label[mask] __lowerCAmelCase = np.array(lowerCamelCase)[mask] __lowerCAmelCase = pred_label[pred_label == label] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) # compute metrics __lowerCAmelCase = {} __lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum() __lowerCAmelCase = total_area_intersect / total_area_union __lowerCAmelCase = total_area_intersect / total_area_label __lowerCAmelCase = np.nanmean(lowerCamelCase) __lowerCAmelCase = np.nanmean(lowerCamelCase) __lowerCAmelCase = all_acc __lowerCAmelCase = iou __lowerCAmelCase = acc if nan_to_num is not None: __lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def _snake_case (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { '''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), '''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), } ) , reference_urls=[ '''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py''' ] , ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ): __lowerCAmelCase = mean_iou( results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , ) return iou_result
9
0
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : List[Any] = logging.get_logger(__name__) _UpperCAmelCase : List[Any] = { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json", } class a__ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __UpperCamelCase : List[Any] = '''xlnet''' __UpperCamelCase : int = ['''mems'''] __UpperCamelCase : Any = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__(self , __lowercase=3_20_00 , __lowercase=10_24 , __lowercase=24 , __lowercase=16 , __lowercase=40_96 , __lowercase="gelu" , __lowercase=True , __lowercase="bi" , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=None , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=-1 , __lowercase=False , __lowercase="last" , __lowercase=True , __lowercase="tanh" , __lowercase=0.1 , __lowercase=5 , __lowercase=5 , __lowercase=5 , __lowercase=1 , __lowercase=2 , **__lowercase , ): __lowerCAmelCase = vocab_size __lowerCAmelCase = d_model __lowerCAmelCase = n_layer __lowerCAmelCase = n_head if d_model % n_head != 0: raise ValueError(F"""\'d_model % n_head\' ({d_model % n_head}) should be equal to 0""" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" ) __lowerCAmelCase = d_model // n_head __lowerCAmelCase = ff_activation __lowerCAmelCase = d_inner __lowerCAmelCase = untie_r __lowerCAmelCase = attn_type __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = dropout __lowerCAmelCase = mem_len __lowerCAmelCase = reuse_len __lowerCAmelCase = bi_data __lowerCAmelCase = clamp_len __lowerCAmelCase = same_length __lowerCAmelCase = summary_type __lowerCAmelCase = summary_use_proj __lowerCAmelCase = summary_activation __lowerCAmelCase = summary_last_dropout __lowerCAmelCase = start_n_top __lowerCAmelCase = end_n_top __lowerCAmelCase = bos_token_id __lowerCAmelCase = pad_token_id __lowerCAmelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , A__ , ) __lowerCAmelCase = kwargs['''use_cache'''] __lowerCAmelCase = use_mems_eval __lowerCAmelCase = use_mems_train super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ ) @property def _snake_case (self ): logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def _snake_case (self , __lowercase ): # Message copied from Transformer-XL documentation raise NotImplementedError( F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
358
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : str = DebertaTokenizer __UpperCamelCase : str = True __UpperCamelCase : Any = DebertaTokenizerFast def _snake_case (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCAmelCase = {'''unk_token''': '''[UNK]'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) def _snake_case (self , **__lowercase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = '''lower newer''' return input_text, output_text def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowerCAmelCase = tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) __lowerCAmelCase = tokens + [tokenizer.unk_token] __lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = tokenizer('''Hello''' , '''World''' ) __lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __lowercase ) @slow def _snake_case (self ): __lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case (self ): __lowerCAmelCase = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] __lowerCAmelCase = tokenizer(__lowercase , padding=__lowercase ) __lowerCAmelCase = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']] # fmt: off __lowerCAmelCase = { '''input_ids''': [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __lowercase ) for expected, decoded in zip(__lowercase , __lowercase ): self.assertEqual(__lowercase , __lowercase )
9
0
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class a__ : """simple docstring""" __UpperCamelCase : int __UpperCamelCase : Node | None = None __UpperCamelCase : Node | None = None def __magic_name__( ): __lowerCAmelCase = Node(1) __lowerCAmelCase = Node(2) __lowerCAmelCase = Node(3) __lowerCAmelCase = Node(4) __lowerCAmelCase = Node(5) return tree def __magic_name__( lowerCamelCase): return [root.data, *preorder(root.left), *preorder(root.right)] if root else [] def __magic_name__( lowerCamelCase): return postorder(root.left) + postorder(root.right) + [root.data] if root else [] def __magic_name__( lowerCamelCase): return [*inorder(root.left), root.data, *inorder(root.right)] if root else [] def __magic_name__( lowerCamelCase): return (max(height(root.left), height(root.right)) + 1) if root else 0 def __magic_name__( lowerCamelCase): __lowerCAmelCase = [] if root is None: return output __lowerCAmelCase = deque([root]) while process_queue: __lowerCAmelCase = process_queue.popleft() output.append(node.data) if node.left: process_queue.append(node.left) if node.right: process_queue.append(node.right) return output def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [] def populate_output(lowerCamelCase, lowerCamelCase) -> None: if not root: return if level == 1: output.append(root.data) elif level > 1: populate_output(root.left, level - 1) populate_output(root.right, level - 1) populate_output(lowerCamelCase, lowerCamelCase) return output def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [] def populate_output(lowerCamelCase, lowerCamelCase) -> None: if root is None: return if level == 1: output.append(root.data) elif level > 1: populate_output(root.right, level - 1) populate_output(root.left, level - 1) populate_output(lowerCamelCase, lowerCamelCase) return output def __magic_name__( lowerCamelCase): if root is None: return [] __lowerCAmelCase = [] __lowerCAmelCase = 0 __lowerCAmelCase = height(lowerCamelCase) for h in range(1, height_tree + 1): if not flag: output.append(get_nodes_from_left_to_right(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = 1 else: output.append(get_nodes_from_right_to_left(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = 0 return output def __magic_name__( ): # Main function for testing. __lowerCAmelCase = make_tree() print(F"""In-order Traversal: {inorder(lowerCamelCase)}""") print(F"""Pre-order Traversal: {preorder(lowerCamelCase)}""") print(F"""Post-order Traversal: {postorder(lowerCamelCase)}""", '''\n''') print(F"""Height of Tree: {height(lowerCamelCase)}""", '''\n''') print('''Complete Level Order Traversal: ''') print(level_order(lowerCamelCase), '''\n''') print('''Level-wise order Traversal: ''') for level in range(1, height(lowerCamelCase) + 1): print(F"""Level {level}:""", get_nodes_from_left_to_right(lowerCamelCase, level=lowerCamelCase)) print('''\nZigZag order Traversal: ''') print(zigzag(lowerCamelCase)) if __name__ == "__main__": import doctest doctest.testmod() main()
359
'''simple docstring''' import argparse import datetime def __magic_name__( lowerCamelCase): __lowerCAmelCase = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } __lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowerCamelCase) < 1_1: raise ValueError('''Must be 10 characters long''') # Get month __lowerCAmelCase = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 1_3: raise ValueError('''Month must be between 1 - 12''') __lowerCAmelCase = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get day __lowerCAmelCase = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 3_2: raise ValueError('''Date must be between 1 - 31''') # Get second separator __lowerCAmelCase = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get year __lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 4_5 < y < 8_5_0_0: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''') # Get datetime obj for validation __lowerCAmelCase = datetime.date(int(lowerCamelCase), int(lowerCamelCase), int(lowerCamelCase)) # Start math if m <= 2: __lowerCAmelCase = y - 1 __lowerCAmelCase = m + 1_2 # maths var __lowerCAmelCase = int(str(lowerCamelCase)[:2]) __lowerCAmelCase = int(str(lowerCamelCase)[2:]) __lowerCAmelCase = int(2.6 * m - 5.39) __lowerCAmelCase = int(c / 4) __lowerCAmelCase = int(k / 4) __lowerCAmelCase = int(d + k) __lowerCAmelCase = int(t + u + v + x) __lowerCAmelCase = int(z - (2 * c)) __lowerCAmelCase = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''') # Response __lowerCAmelCase = F"""Your date {date_input}, is a {days[str(lowerCamelCase)]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : List[str] = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) _UpperCAmelCase : Dict = parser.parse_args() zeller(args.date_input)
9
0
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class a__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Tuple = (EulerDiscreteScheduler,) __UpperCamelCase : str = 10 def _snake_case (self , **__lowercase ): __lowerCAmelCase = { '''num_train_timesteps''': 11_00, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', } config.update(**__A ) return config def _snake_case (self ): for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=__A ) def _snake_case (self ): for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=__A , beta_end=__A ) def _snake_case (self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__A ) def _snake_case (self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__A ) def _snake_case (self ): __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**__A ) scheduler.set_timesteps(self.num_inference_steps ) __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma __lowerCAmelCase = sample.to(__A ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase = scheduler.scale_model_input(__A , __A ) __lowerCAmelCase = model(__A , __A ) __lowerCAmelCase = scheduler.step(__A , __A , __A , generator=__A ) __lowerCAmelCase = output.prev_sample __lowerCAmelCase = torch.sum(torch.abs(__A ) ) __lowerCAmelCase = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 10.08_07 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def _snake_case (self ): __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) __lowerCAmelCase = scheduler_class(**__A ) scheduler.set_timesteps(self.num_inference_steps ) __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma __lowerCAmelCase = sample.to(__A ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase = scheduler.scale_model_input(__A , __A ) __lowerCAmelCase = model(__A , __A ) __lowerCAmelCase = scheduler.step(__A , __A , __A , generator=__A ) __lowerCAmelCase = output.prev_sample __lowerCAmelCase = torch.sum(torch.abs(__A ) ) __lowerCAmelCase = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2 assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3 def _snake_case (self ): __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**__A ) scheduler.set_timesteps(self.num_inference_steps , device=__A ) __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __lowerCAmelCase = sample.to(__A ) for t in scheduler.timesteps: __lowerCAmelCase = scheduler.scale_model_input(__A , __A ) __lowerCAmelCase = model(__A , __A ) __lowerCAmelCase = scheduler.step(__A , __A , __A , generator=__A ) __lowerCAmelCase = output.prev_sample __lowerCAmelCase = torch.sum(torch.abs(__A ) ) __lowerCAmelCase = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 10.08_07 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def _snake_case (self ): __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**__A , use_karras_sigmas=__A ) scheduler.set_timesteps(self.num_inference_steps , device=__A ) __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __lowerCAmelCase = sample.to(__A ) for t in scheduler.timesteps: __lowerCAmelCase = scheduler.scale_model_input(__A , __A ) __lowerCAmelCase = model(__A , __A ) __lowerCAmelCase = scheduler.step(__A , __A , __A , generator=__A ) __lowerCAmelCase = output.prev_sample __lowerCAmelCase = torch.sum(torch.abs(__A ) ) __lowerCAmelCase = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1e-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
360
'''simple docstring''' import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = ConsistencyModelPipeline __UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS __UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt __UpperCamelCase : List[Any] = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) @property def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def _snake_case (self , __lowercase=False ): if class_cond: __lowerCAmelCase = self.dummy_cond_unet else: __lowerCAmelCase = self.dummy_uncond_unet # Default to CM multistep sampler __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, } return components def _snake_case (self , __lowercase , __lowercase=0 ): if str(__lowercase ).startswith('''mps''' ): __lowerCAmelCase = torch.manual_seed(__lowercase ) else: __lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCAmelCase = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase ) __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 0 __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase ) __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = 0 __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case (self , __lowercase=0 , __lowercase=False , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ): __lowerCAmelCase = torch.manual_seed(__lowercase ) __lowerCAmelCase = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: __lowerCAmelCase = self.get_fixed_latents(seed=__lowercase , device=__lowercase , dtype=__lowercase , shape=__lowercase ) __lowerCAmelCase = latents return inputs def _snake_case (self , __lowercase=0 , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ): if type(__lowercase ) == str: __lowerCAmelCase = torch.device(__lowercase ) __lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase ) return latents def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs() __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs() __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ): __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ): __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
9
0
'''simple docstring''' import os from collections import deque import torch from torch.utils.data import Dataset class a__ ( __A ): """simple docstring""" def __init__(self , __lowercase="" , __lowercase="train" ): assert os.path.isdir(__a ) __lowerCAmelCase = [] __lowerCAmelCase = os.listdir(__a ) for story_filename in story_filenames_list: if "summary" in story_filename: continue __lowerCAmelCase = os.path.join(__a , __a ) if not os.path.isfile(__a ): continue self.documents.append(__a ) def __len__(self ): return len(self.documents ) def __getitem__(self , __lowercase ): __lowerCAmelCase = self.documents[idx] __lowerCAmelCase = document_path.split('''/''' )[-1] with open(__a , encoding='''utf-8''' ) as source: __lowerCAmelCase = source.read() __lowerCAmelCase , __lowerCAmelCase = process_story(__a ) return document_name, story_lines, summary_lines def __magic_name__( lowerCamelCase): __lowerCAmelCase = list(filter(lambda lowerCamelCase: len(__snake_case) != 0, [line.strip() for line in raw_story.split('''\n''')])) # for some unknown reason some lines miss a period, add it __lowerCAmelCase = [_add_missing_period(__snake_case) for line in nonempty_lines] # gather article lines __lowerCAmelCase = [] __lowerCAmelCase = deque(__snake_case) while True: try: __lowerCAmelCase = lines.popleft() if element.startswith('''@highlight'''): break story_lines.append(__snake_case) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines __lowerCAmelCase = list(filter(lambda lowerCamelCase: not t.startswith('''@highlight'''), __snake_case)) return story_lines, summary_lines def __magic_name__( lowerCamelCase): __lowerCAmelCase = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')'''] if line.startswith('''@highlight'''): return line if line[-1] in END_TOKENS: return line return line + "." def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): if len(__snake_case) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(__snake_case))) return sequence def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = torch.ones_like(__snake_case) __lowerCAmelCase = sequence == pad_token_id __lowerCAmelCase = 0 return mask def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [tokenizer.encode(__snake_case) for line in story_lines] __lowerCAmelCase = [token for sentence in story_lines_token_ids for token in sentence] __lowerCAmelCase = [tokenizer.encode(__snake_case) for line in summary_lines] __lowerCAmelCase = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [] for sequence in batch: __lowerCAmelCase = -1 __lowerCAmelCase = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2) batch_embeddings.append(__snake_case) return torch.tensor(__snake_case)
361
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split _UpperCAmelCase : List[Any] = datasets.load_iris() _UpperCAmelCase : Dict = np.array(data["""data"""]) _UpperCAmelCase : int = np.array(data["""target"""]) _UpperCAmelCase : str = data["""target_names"""] _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = train_test_split(X, y) def __magic_name__( lowerCamelCase, lowerCamelCase): return np.linalg.norm(np.array(lowerCamelCase) - np.array(lowerCamelCase)) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=5): __lowerCAmelCase = zip(lowerCamelCase, lowerCamelCase) # List of distances of all points from the point to be classified __lowerCAmelCase = [] for data_point in data: __lowerCAmelCase = euclidean_distance(data_point[0], lowerCamelCase) distances.append((distance, data_point[1])) # Choosing 'k' points with the least distances. __lowerCAmelCase = [i[1] for i in sorted(lowerCamelCase)[:k]] # Most commonly occurring class among them # is the class into which the point is classified __lowerCAmelCase = Counter(lowerCamelCase).most_common(1)[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
9
0
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": _UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) _UpperCAmelCase : Dict = parser.parse_args() _UpperCAmelCase : Any = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) _UpperCAmelCase : Tuple = CLIPImageProcessor() _UpperCAmelCase : Optional[int] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") _UpperCAmelCase : Optional[int] = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
362
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) __lowerCAmelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowercase , __lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase ) def _snake_case (self , **__lowercase ): return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase ) __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowercase ) self.assertIsInstance(processor_fast.tokenizer , __lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowercase ) self.assertIsInstance(processor_fast.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase ) __lowerCAmelCase = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' ) __lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__lowercase , return_tensors='''np''' ) __lowerCAmelCase = tokenizer(__lowercase , return_tensors='''np''' ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = ['''cat''', '''nasa badge'''] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = [['''cat''', '''nasa badge'''], ['''person''']] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 __lowerCAmelCase = len(__lowercase ) __lowerCAmelCase = max([len(__lowercase ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = ['''cat''', '''nasa badge'''] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 __lowerCAmelCase = inputs['''input_ids'''] __lowerCAmelCase = [ [4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(images=__lowercase , query_images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__lowercase ) __lowerCAmelCase = tokenizer.batch_decode(__lowercase ) self.assertListEqual(__lowercase , __lowercase )
9
0
'''simple docstring''' from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run _UpperCAmelCase : Optional[int] = True except (ImportError, AttributeError): _UpperCAmelCase : List[str] = object def __magic_name__( *lowerCamelCase, **lowerCamelCase): pass _UpperCAmelCase : Dict = False _UpperCAmelCase : Optional[int] = logging.get_logger("""transformers-cli/serving""") def __magic_name__( lowerCamelCase): __lowerCAmelCase = pipeline( task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, ) return ServeCommand(SCREAMING_SNAKE_CASE_, args.host, args.port, args.workers) class a__ ( snake_case__ ): """simple docstring""" __UpperCamelCase : dict class a__ ( snake_case__ ): """simple docstring""" __UpperCamelCase : List[str] __UpperCamelCase : Optional[List[int]] class a__ ( snake_case__ ): """simple docstring""" __UpperCamelCase : str class a__ ( snake_case__ ): """simple docstring""" __UpperCamelCase : Any class a__ ( snake_case__ ): """simple docstring""" @staticmethod def _snake_case (__lowercase ): __lowerCAmelCase = parser.add_parser( '''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' ) serve_parser.add_argument( '''--task''' , type=_A , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , ) serve_parser.add_argument('''--host''' , type=_A , default='''localhost''' , help='''Interface the server will listen on.''' ) serve_parser.add_argument('''--port''' , type=_A , default=88_88 , help='''Port the serving will listen to.''' ) serve_parser.add_argument('''--workers''' , type=_A , default=1 , help='''Number of http workers''' ) serve_parser.add_argument('''--model''' , type=_A , help='''Model\'s name or path to stored model.''' ) serve_parser.add_argument('''--config''' , type=_A , help='''Model\'s config name or path to stored model.''' ) serve_parser.add_argument('''--tokenizer''' , type=_A , help='''Tokenizer name to use.''' ) serve_parser.add_argument( '''--device''' , type=_A , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , ) serve_parser.set_defaults(func=_A ) def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = pipeline __lowerCAmelCase = host __lowerCAmelCase = port __lowerCAmelCase = workers if not _serve_dependencies_installed: raise RuntimeError( '''Using serve command requires FastAPI and uvicorn. ''' '''Please install transformers with [serving]: pip install \"transformers[serving]\".''' '''Or install FastAPI and uvicorn separately.''' ) else: logger.info(F"""Serving model over {host}:{port}""" ) __lowerCAmelCase = FastAPI( routes=[ APIRoute( '''/''' , self.model_info , response_model=_A , response_class=_A , methods=['''GET'''] , ), APIRoute( '''/tokenize''' , self.tokenize , response_model=_A , response_class=_A , methods=['''POST'''] , ), APIRoute( '''/detokenize''' , self.detokenize , response_model=_A , response_class=_A , methods=['''POST'''] , ), APIRoute( '''/forward''' , self.forward , response_model=_A , response_class=_A , methods=['''POST'''] , ), ] , timeout=6_00 , ) def _snake_case (self ): run(self._app , host=self.host , port=self.port , workers=self.workers ) def _snake_case (self ): return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def _snake_case (self , __lowercase = Body(_A , embed=_A ) , __lowercase = Body(_A , embed=_A ) ): try: __lowerCAmelCase = self._pipeline.tokenizer.tokenize(_A ) if return_ids: __lowerCAmelCase = self._pipeline.tokenizer.convert_tokens_to_ids(_A ) return ServeTokenizeResult(tokens=_A , tokens_ids=_A ) else: return ServeTokenizeResult(tokens=_A ) except Exception as e: raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(_A )} ) def _snake_case (self , __lowercase = Body(_A , embed=_A ) , __lowercase = Body(_A , embed=_A ) , __lowercase = Body(_A , embed=_A ) , ): try: __lowerCAmelCase = self._pipeline.tokenizer.decode(_A , _A , _A ) return ServeDeTokenizeResult(model='''''' , text=_A ) except Exception as e: raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(_A )} ) async def _snake_case (self , __lowercase=Body(_A , embed=_A ) ): if len(_A ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model __lowerCAmelCase = self._pipeline(_A ) return ServeForwardResult(output=_A ) except Exception as e: raise HTTPException(5_00 , {'''error''': str(_A )} )
363
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __magic_name__( ): __lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)] __lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0) return (arr, r) _UpperCAmelCase : Dict = make_dataset() def __magic_name__( lowerCamelCase, lowerCamelCase): for triplet in permutations(lowerCamelCase, 3): if sum(lowerCamelCase) == target: return tuple(sorted(lowerCamelCase)) return (0, 0, 0) def __magic_name__( lowerCamelCase, lowerCamelCase): arr.sort() __lowerCAmelCase = len(lowerCamelCase) for i in range(n - 1): __lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __magic_name__( ): __lowerCAmelCase = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' __lowerCAmelCase = ''' triplet_sum1(*dataset) ''' __lowerCAmelCase = ''' triplet_sum2(*dataset) ''' __lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0) __lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0) return (min(lowerCamelCase), min(lowerCamelCase)) if __name__ == "__main__": from doctest import testmod testmod() _UpperCAmelCase : Union[str, Any] = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
9
0
'''simple docstring''' import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class a__ ( a__ , a__ ): """simple docstring""" __UpperCamelCase : Optional[Any] = 1 @register_to_config def __init__(self , __lowercase = 10_00 , __lowercase = None ): # set `betas`, `alphas`, `timesteps` self.set_timesteps(SCREAMING_SNAKE_CASE_ ) # standard deviation of the initial noise distribution __lowerCAmelCase = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. __lowerCAmelCase = 4 # running values __lowerCAmelCase = [] def _snake_case (self , __lowercase , __lowercase = None ): __lowerCAmelCase = num_inference_steps __lowerCAmelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] __lowerCAmelCase = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: __lowerCAmelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: __lowerCAmelCase = torch.sin(steps * math.pi / 2 ) ** 2 __lowerCAmelCase = (1.0 - self.betas**2) ** 0.5 __lowerCAmelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] __lowerCAmelCase = timesteps.to(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = [] def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase = True , ): if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) __lowerCAmelCase = (self.timesteps == timestep).nonzero().item() __lowerCAmelCase = timestep_index + 1 __lowerCAmelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(SCREAMING_SNAKE_CASE_ ) if len(self.ets ) == 1: __lowerCAmelCase = self.ets[-1] elif len(self.ets ) == 2: __lowerCAmelCase = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: __lowerCAmelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: __lowerCAmelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) __lowerCAmelCase = self._get_prev_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ ) def _snake_case (self , __lowercase , *__lowercase , **__lowercase ): return sample def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = self.alphas[timestep_index] __lowerCAmelCase = self.betas[timestep_index] __lowerCAmelCase = self.alphas[prev_timestep_index] __lowerCAmelCase = self.betas[prev_timestep_index] __lowerCAmelCase = (sample - sigma * ets) / max(SCREAMING_SNAKE_CASE_ , 1e-8 ) __lowerCAmelCase = next_alpha * pred + ets * next_sigma return prev_sample def __len__(self ): return self.config.num_train_timesteps
364
'''simple docstring''' import numpy as np def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1E-12, lowerCamelCase = 1_0_0, ): assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[1] # Ensure proper dimensionality. assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(lowerCamelCase) == np.iscomplexobj(lowerCamelCase) __lowerCAmelCase = np.iscomplexobj(lowerCamelCase) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(lowerCamelCase, input_matrix.conj().T) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __lowerCAmelCase = False __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 1E12 while not convergence: # Multiple matrix by the vector. __lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase) # Normalize the resulting output vector. __lowerCAmelCase = w / np.linalg.norm(lowerCamelCase) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __lowerCAmelCase = vector.conj().T if is_complex else vector.T __lowerCAmelCase = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase)) # Check convergence. __lowerCAmelCase = np.abs(lambda_ - lambda_previous) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __lowerCAmelCase = True __lowerCAmelCase = lambda_ if is_complex: __lowerCAmelCase = np.real(lambda_) return lambda_, vector def __magic_name__( ): __lowerCAmelCase = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]]) __lowerCAmelCase = np.array([4_1, 4, 2_0]) __lowerCAmelCase = real_input_matrix.astype(np.complexaaa) __lowerCAmelCase = np.triu(1J * complex_input_matrix, 1) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __lowerCAmelCase = np.array([4_1, 4, 2_0]).astype(np.complexaaa) for problem_type in ["real", "complex"]: if problem_type == "real": __lowerCAmelCase = real_input_matrix __lowerCAmelCase = real_vector elif problem_type == "complex": __lowerCAmelCase = complex_input_matrix __lowerCAmelCase = complex_vector # Our implementation. __lowerCAmelCase , __lowerCAmelCase = power_iteration(lowerCamelCase, lowerCamelCase) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(lowerCamelCase) # Last eigenvalue is the maximum one. __lowerCAmelCase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __lowerCAmelCase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(lowerCamelCase) - np.abs(lowerCamelCase)) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
9
0
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase : Tuple = { "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"], "tokenization_cpmant": ["CpmAntTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : str = [ "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", "CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys _UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
365
'''simple docstring''' from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract _UpperCAmelCase : str = logging.get_logger(__name__) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): return [ int(1_0_0_0 * (box[0] / width)), int(1_0_0_0 * (box[1] / height)), int(1_0_0_0 * (box[2] / width)), int(1_0_0_0 * (box[3] / height)), ] def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = None): __lowerCAmelCase = tesseract_config if tesseract_config is not None else '''''' # apply OCR __lowerCAmelCase = to_pil_image(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = pil_image.size __lowerCAmelCase = pytesseract.image_to_data(lowerCamelCase, lang=lowerCamelCase, output_type='''dict''', config=lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates __lowerCAmelCase = [idx for idx, word in enumerate(lowerCamelCase) if not word.strip()] __lowerCAmelCase = [word for idx, word in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format __lowerCAmelCase = [] for x, y, w, h in zip(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [x, y, x + w, y + h] actual_boxes.append(lowerCamelCase) # finally, normalize the bounding boxes __lowerCAmelCase = [] for box in actual_boxes: normalized_boxes.append(normalize_box(lowerCamelCase, lowerCamelCase, lowerCamelCase)) assert len(lowerCamelCase) == len(lowerCamelCase), "Not as many words as there are bounding boxes" return words, normalized_boxes class a__ ( __A ): """simple docstring""" __UpperCamelCase : str = ['pixel_values'] def __init__(self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ): super().__init__(**__lowercase ) __lowerCAmelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24} __lowerCAmelCase = get_size_dict(__lowercase ) __lowerCAmelCase = do_resize __lowerCAmelCase = size __lowerCAmelCase = resample __lowerCAmelCase = apply_ocr __lowerCAmelCase = ocr_lang __lowerCAmelCase = tesseract_config def _snake_case (self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ): __lowerCAmelCase = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __lowerCAmelCase = (size['''height'''], size['''width''']) return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ): __lowerCAmelCase = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase = size if size is not None else self.size __lowerCAmelCase = get_size_dict(__lowercase ) __lowerCAmelCase = resample if resample is not None else self.resample __lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr __lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang __lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config __lowerCAmelCase = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) # All transformations expect numpy arrays. __lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images] if apply_ocr: requires_backends(self , '''pytesseract''' ) __lowerCAmelCase = [] __lowerCAmelCase = [] for image in images: __lowerCAmelCase , __lowerCAmelCase = apply_tesseract(__lowercase , __lowercase , __lowercase ) words_batch.append(__lowercase ) boxes_batch.append(__lowercase ) if do_resize: __lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) __lowerCAmelCase = [flip_channel_order(__lowercase ) for image in images] __lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] __lowerCAmelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase ) if apply_ocr: __lowerCAmelCase = words_batch __lowerCAmelCase = boxes_batch return data
9
0
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) _UpperCAmelCase : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} _UpperCAmelCase : List[Any] = { """vocab_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), }, """merges_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), }, } _UpperCAmelCase : List[Any] = { """allenai/longformer-base-4096""": 4_0_9_6, """allenai/longformer-large-4096""": 4_0_9_6, """allenai/longformer-large-4096-finetuned-triviaqa""": 4_0_9_6, """allenai/longformer-base-4096-extra.pos.embd.only""": 4_0_9_6, """allenai/longformer-large-4096-extra.pos.embd.only""": 4_0_9_6, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __magic_name__( ): __lowerCAmelCase = ( list(range(ord('''!'''), ord('''~''') + 1)) + list(range(ord('''¡'''), ord('''¬''') + 1)) + list(range(ord('''®'''), ord('''ÿ''') + 1)) ) __lowerCAmelCase = bs[:] __lowerCAmelCase = 0 for b in range(2**8): if b not in bs: bs.append(lowerCamelCase) cs.append(2**8 + n) n += 1 __lowerCAmelCase = [chr(lowerCamelCase) for n in cs] return dict(zip(lowerCamelCase, lowerCamelCase)) def __magic_name__( lowerCamelCase): __lowerCAmelCase = set() __lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char)) __lowerCAmelCase = char return pairs class a__ ( UpperCAmelCase_ ): """simple docstring""" __UpperCamelCase : List[str] = VOCAB_FILES_NAMES __UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Dict = ["""input_ids""", """attention_mask"""] def __init__(self , __lowercase , __lowercase , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , **__lowercase , ): __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , ) with open(__lowercase , encoding='''utf-8''' ) as vocab_handle: __lowerCAmelCase = json.load(__lowercase ) __lowerCAmelCase = {v: k for k, v in self.encoder.items()} __lowerCAmelCase = errors # how to handle errors in decoding __lowerCAmelCase = bytes_to_unicode() __lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()} with open(__lowercase , encoding='''utf-8''' ) as merges_handle: __lowerCAmelCase = merges_handle.read().split('''\n''' )[1:-1] __lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges] __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = {} __lowerCAmelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowerCAmelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def _snake_case (self ): return len(self.encoder ) def _snake_case (self ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case (self , __lowercase ): if token in self.cache: return self.cache[token] __lowerCAmelCase = tuple(__lowercase ) __lowerCAmelCase = get_pairs(__lowercase ) if not pairs: return token while True: __lowerCAmelCase = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowerCAmelCase = bigram __lowerCAmelCase = [] __lowerCAmelCase = 0 while i < len(__lowercase ): try: __lowerCAmelCase = word.index(__lowercase , __lowercase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowerCAmelCase = j if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCAmelCase = tuple(__lowercase ) __lowerCAmelCase = new_word if len(__lowercase ) == 1: break else: __lowerCAmelCase = get_pairs(__lowercase ) __lowerCAmelCase = ''' '''.join(__lowercase ) __lowerCAmelCase = word return word def _snake_case (self , __lowercase ): __lowerCAmelCase = [] for token in re.findall(self.pat , __lowercase ): __lowerCAmelCase = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(''' ''' ) ) return bpe_tokens def _snake_case (self , __lowercase ): return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) ) def _snake_case (self , __lowercase ): return self.decoder.get(__lowercase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = ''''''.join(__lowercase ) __lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def _snake_case (self , __lowercase , __lowercase = None ): if not os.path.isdir(__lowercase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' ) __lowerCAmelCase = 0 with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) __lowerCAmelCase = token_index writer.write(''' '''.join(__lowercase ) + '''\n''' ) index += 1 return vocab_file, merge_file def _snake_case (self , __lowercase , __lowercase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] __lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case (self , __lowercase , __lowercase = None , __lowercase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase ) if token_ids_a is None: return [1] + ([0] * len(__lowercase )) + [1] return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1] def _snake_case (self , __lowercase , __lowercase = None ): __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case (self , __lowercase , __lowercase=False , **__lowercase ): __lowerCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()): __lowerCAmelCase = ''' ''' + text return (text, kwargs)
366
'''simple docstring''' from ..utils import DummyObject, requires_backends class a__ ( metaclass=__A ): """simple docstring""" __UpperCamelCase : int = ['torch', 'scipy'] def __init__(self , *__lowercase , **__lowercase ): requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''torch''', '''scipy'''] )
9
0
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class a__ : def _snake_case (self , __lowercase , __lowercase , __lowercase ): return None class a__ : def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase ): return None class a__ ( unittest.TestCase ): __UpperCamelCase : Any = [ # (model_name, model_kwargs) ("""bert-base-cased""", {}), ("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _snake_case (self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__a , '''tf''' , 12 , **__a ) @require_torch @slow def _snake_case (self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__a , '''pt''' , 12 , **__a ) @require_torch @slow def _snake_case (self ): from transformers import BertModel __lowerCAmelCase = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='''w+t''' ) as vocab_file: vocab_file.write('''\n'''.join(__a ) ) vocab_file.flush() __lowerCAmelCase = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: __lowerCAmelCase = BertModel(BertConfig(vocab_size=len(__a ) ) ) model.save_pretrained(__a ) self._test_export(__a , '''pt''' , 12 , __a ) @require_tf @slow def _snake_case (self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: __lowerCAmelCase = self._test_export(__a , '''tf''' , 12 , **__a ) __lowerCAmelCase = quantize(Path(__a ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__a ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) @require_torch @slow def _snake_case (self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: __lowerCAmelCase = self._test_export(__a , '''pt''' , 12 , **__a ) __lowerCAmelCase = quantize(__a ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__a ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase=None , **__lowercase ): try: # Compute path with TemporaryDirectory() as tempdir: __lowerCAmelCase = Path(__a ).joinpath('''model.onnx''' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__a , __a , __a , __a , __a , **__a ) return path except Exception as e: self.fail(__a ) @require_torch @require_tokenizers @slow def _snake_case (self ): from transformers import BertModel __lowerCAmelCase = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) __lowerCAmelCase = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(__a , __a , '''pt''' ) @require_tf @require_tokenizers @slow def _snake_case (self ): from transformers import TFBertModel __lowerCAmelCase = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) __lowerCAmelCase = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(__a , __a , '''tf''' ) def _snake_case (self , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = FeatureExtractionPipeline(__a , __a ) __lowerCAmelCase = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = infer_shapes(__a , __a ) # Assert all variables are present self.assertEqual(len(__a ) , len(__a ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __a ) self.assertSequenceEqual(variable_names[3:] , __a ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} ) self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} ) def _snake_case (self ): __lowerCAmelCase = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] __lowerCAmelCase = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} __lowerCAmelCase , __lowerCAmelCase = ensure_valid_input(FuncContiguousArgs() , __a , __a ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__a ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__a ) , set(__a ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__a , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) __lowerCAmelCase , __lowerCAmelCase = ensure_valid_input(FuncNonContiguousArgs() , __a , __a ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__a ) , 1 ) self.assertEqual(len(__a ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['''input_ids'''] ) self.assertEqual(ordered_input_names[0] , '''input_ids''' ) def _snake_case (self ): __lowerCAmelCase = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' ) self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
367
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class a__ ( unittest.TestCase ): """simple docstring""" def __init__(self , __lowercase , __lowercase = True , __lowercase = None , __lowercase = 32 , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = True , __lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowercase = True , __lowercase=7 , __lowercase=30 , __lowercase=4_00 , __lowercase=3 , ): __lowerCAmelCase = parent __lowerCAmelCase = do_resize __lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_88} __lowerCAmelCase = size_divisor __lowerCAmelCase = do_rescale __lowerCAmelCase = rescale_factor __lowerCAmelCase = do_normalize __lowerCAmelCase = do_center_crop __lowerCAmelCase = image_mean __lowerCAmelCase = image_std __lowerCAmelCase = do_pad __lowerCAmelCase = batch_size __lowerCAmelCase = num_channels __lowerCAmelCase = min_resolution __lowerCAmelCase = max_resolution def _snake_case (self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _snake_case (self , __lowercase , __lowercase=False ): if not batched: __lowerCAmelCase = self.size['''shortest_edge'''] __lowerCAmelCase = image_inputs[0] if isinstance(__lowercase , Image.Image ): __lowerCAmelCase , __lowerCAmelCase = image.size else: __lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2] __lowerCAmelCase = size / min(__lowercase , __lowercase ) if h < w: __lowerCAmelCase , __lowerCAmelCase = size, scale * w else: __lowerCAmelCase , __lowerCAmelCase = scale * h, size __lowerCAmelCase = int((13_33 / 8_00) * size ) if max(__lowercase , __lowercase ) > max_size: __lowerCAmelCase = max_size / max(__lowercase , __lowercase ) __lowerCAmelCase = newh * scale __lowerCAmelCase = neww * scale __lowerCAmelCase , __lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 ) __lowerCAmelCase , __lowerCAmelCase = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __lowerCAmelCase = [] for image in image_inputs: __lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[0] )[0] __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None def _snake_case (self ): __lowerCAmelCase = BridgeTowerImageProcessingTester(self ) @property def _snake_case (self ): return self.image_processor_tester.prepare_image_processor_dict() def _snake_case (self ): __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowercase , '''image_std''' ) ) self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowercase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowercase , '''size''' ) ) self.assertTrue(hasattr(__lowercase , '''size_divisor''' ) ) def _snake_case (self ): pass def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , Image.Image ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , np.ndarray ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , torch.Tensor ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
9
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig _UpperCAmelCase : Any = { """google/tapas-base-finetuned-sqa""": ( """https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json""" ), """google/tapas-base-finetuned-wtq""": ( """https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json""" ), """google/tapas-base-finetuned-wikisql-supervised""": ( """https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json""" ), """google/tapas-base-finetuned-tabfact""": ( """https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json""" ), } class a__ ( _UpperCamelCase ): """simple docstring""" __UpperCamelCase : int = 'tapas' def __init__(self , __lowercase=3_05_22 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=10_24 , __lowercase=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=0 , __lowercase=1_0.0 , __lowercase=0 , __lowercase=1.0 , __lowercase=None , __lowercase=1.0 , __lowercase=False , __lowercase=None , __lowercase=1.0 , __lowercase=1.0 , __lowercase=False , __lowercase=False , __lowercase="ratio" , __lowercase=None , __lowercase=None , __lowercase=64 , __lowercase=32 , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase=None , __lowercase=None , **__lowercase , ): super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_sizes __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps # Fine-tuning task hyperparameters __lowerCAmelCase = positive_label_weight __lowerCAmelCase = num_aggregation_labels __lowerCAmelCase = aggregation_loss_weight __lowerCAmelCase = use_answer_as_supervision __lowerCAmelCase = answer_loss_importance __lowerCAmelCase = use_normalized_answer_loss __lowerCAmelCase = huber_loss_delta __lowerCAmelCase = temperature __lowerCAmelCase = aggregation_temperature __lowerCAmelCase = use_gumbel_for_cells __lowerCAmelCase = use_gumbel_for_aggregation __lowerCAmelCase = average_approximation_function __lowerCAmelCase = cell_selection_preference __lowerCAmelCase = answer_loss_cutoff __lowerCAmelCase = max_num_rows __lowerCAmelCase = max_num_columns __lowerCAmelCase = average_logits_per_cell __lowerCAmelCase = select_one_column __lowerCAmelCase = allow_empty_column_selection __lowerCAmelCase = init_cell_selection_weights_to_zero __lowerCAmelCase = reset_position_index_per_cell __lowerCAmelCase = disable_per_token_loss # Aggregation hyperparameters __lowerCAmelCase = aggregation_labels __lowerCAmelCase = no_aggregation_label_index if isinstance(self.aggregation_labels , _UpperCAmelCase ): __lowerCAmelCase = {int(_UpperCAmelCase ): v for k, v in aggregation_labels.items()}
368
'''simple docstring''' # Imports import numpy as np class a__ : """simple docstring""" def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase ) def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): if red is not None: __lowerCAmelCase = red if green is not None: __lowerCAmelCase = green if blue is not None: __lowerCAmelCase = blue if red_edge is not None: __lowerCAmelCase = red_edge if nir is not None: __lowerCAmelCase = nir return True def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase ) __lowerCAmelCase = { '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('''Index not in the list!''' ) return False def _snake_case (self ): return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def _snake_case (self ): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _snake_case (self ): return self.nir * (self.red / (self.green**2)) def _snake_case (self ): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _snake_case (self ): return (self.nir - self.red) / (self.nir + self.red) def _snake_case (self ): return (self.nir - self.blue) / (self.nir + self.blue) def _snake_case (self ): return (self.redEdge - self.red) / (self.redEdge + self.red) def _snake_case (self ): return (self.nir - self.green) / (self.nir + self.green) def _snake_case (self ): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _snake_case (self ): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _snake_case (self ): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _snake_case (self ): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _snake_case (self ): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _snake_case (self ): return (self.nir / self.green) - 1 def _snake_case (self ): return (self.nir / self.redEdge) - 1 def _snake_case (self ): return (self.red - self.blue) / self.red def _snake_case (self ): __lowerCAmelCase = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _snake_case (self ): return self.nir - self.green def _snake_case (self ): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _snake_case (self ): __lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def _snake_case (self , __lowercase=0.1_6 ): return (self.nir - self.green) / (self.nir + self.green + y) def _snake_case (self , __lowercase=0.5 ): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _snake_case (self ): return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def _snake_case (self , __lowercase=None , __lowercase=None ): return (self.nir - b) / (a * self.red) def _snake_case (self ): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _snake_case (self ): return (self.red + self.green + self.blue) / 3_0.5 def _snake_case (self ): return self.nir / self.red def _snake_case (self ): return (self.rvi() - 1) / (self.rvi() + 1) def _snake_case (self ): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _snake_case (self ): return self.green / (self.nir + self.red + self.green) def _snake_case (self ): return self.nir / (self.nir + self.red + self.green) def _snake_case (self ): return self.red / (self.nir + self.red + self.green) def _snake_case (self ): return (self.green - self.red) / (self.green + self.red) def _snake_case (self ): return (self.red - self.green) / (self.red + self.green) def _snake_case (self ): __lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) __lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _snake_case (self ): return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _snake_case (self ): return self.nir / self.red def _snake_case (self ): return (self.ndvi() + 0.5) ** (1 / 2) def _snake_case (self ): return (self.nir - self.redEdge) / (self.nir + self.redEdge)
9
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : List[Any] = {} class a__ ( __snake_case ): """simple docstring""" __UpperCamelCase : Optional[Any] = """llama""" __UpperCamelCase : Optional[Any] = ["""past_key_values"""] def __init__(self , __lowercase=3_20_00 , __lowercase=40_96 , __lowercase=1_10_08 , __lowercase=32 , __lowercase=32 , __lowercase=None , __lowercase="silu" , __lowercase=20_48 , __lowercase=0.0_2 , __lowercase=1e-6 , __lowercase=True , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=1 , __lowercase=False , __lowercase=None , **__lowercase , ): __lowerCAmelCase = vocab_size __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = hidden_size __lowerCAmelCase = intermediate_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads # for backward compatibility if num_key_value_heads is None: __lowerCAmelCase = num_attention_heads __lowerCAmelCase = num_key_value_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = initializer_range __lowerCAmelCase = rms_norm_eps __lowerCAmelCase = pretraining_tp __lowerCAmelCase = use_cache __lowerCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ , ) def _snake_case (self ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , UpperCamelCase__ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"""got {self.rope_scaling}""" ) __lowerCAmelCase = self.rope_scaling.get('''type''' , UpperCamelCase__ ) __lowerCAmelCase = self.rope_scaling.get('''factor''' , UpperCamelCase__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
369
'''simple docstring''' from math import sqrt def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' must been an int and positive" __lowerCAmelCase = True # 0 and 1 are none primes. if number <= 1: __lowerCAmelCase = False for divisor in range(2, int(round(sqrt(lowerCamelCase))) + 1): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __lowerCAmelCase = False break # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'status' must been from type bool" return status def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __lowerCAmelCase = list(range(2, n + 1)) __lowerCAmelCase = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCamelCase)): for j in range(i + 1, len(lowerCamelCase)): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __lowerCAmelCase = 0 # filters actual prime numbers. __lowerCAmelCase = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2" __lowerCAmelCase = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2, n + 1): if is_prime(lowerCamelCase): ans.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and number >= 0, "'number' must been an int and >= 0" __lowerCAmelCase = [] # this list will be returns of the function. # potential prime number factors. __lowerCAmelCase = 2 __lowerCAmelCase = number if number == 0 or number == 1: ans.append(lowerCamelCase) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCamelCase): while quotient != 1: if is_prime(lowerCamelCase) and (quotient % factor == 0): ans.append(lowerCamelCase) quotient /= factor else: factor += 1 else: ans.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCAmelCase = 0 # prime factorization of 'number' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = max(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCAmelCase = 0 # prime factorization of 'number' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = min(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int" assert isinstance(number % 2 == 0, lowerCamelCase), "compare bust been from type bool" return number % 2 == 0 def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int" assert isinstance(number % 2 != 0, lowerCamelCase), "compare bust been from type bool" return number % 2 != 0 def __magic_name__( lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and (number > 2) and is_even(lowerCamelCase) ), "'number' must been an int, even and > 2" __lowerCAmelCase = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __lowerCAmelCase = get_prime_numbers(lowerCamelCase) __lowerCAmelCase = len(lowerCamelCase) # run variable for while-loops. __lowerCAmelCase = 0 __lowerCAmelCase = None # exit variable. for break up the loops __lowerCAmelCase = True while i < len_pn and loop: __lowerCAmelCase = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __lowerCAmelCase = False ans.append(prime_numbers[i]) ans.append(prime_numbers[j]) j += 1 i += 1 # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (len(lowerCamelCase) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0]) and is_prime(ans[1]) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __lowerCAmelCase = 0 while numbera != 0: __lowerCAmelCase = numbera % numbera __lowerCAmelCase = numbera __lowerCAmelCase = rest # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __lowerCAmelCase = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = prime_factorization(lowerCamelCase) elif numbera == 1 or numbera == 1: __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = max(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(max(lowerCamelCase, lowerCamelCase)): ans *= n else: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(lowerCamelCase): ans *= n done.append(lowerCamelCase) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(lowerCamelCase): ans *= n done.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'number' must been a positive int" __lowerCAmelCase = 0 __lowerCAmelCase = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCamelCase): ans += 1 # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and is_prime( lowerCamelCase), "'ans' must been a prime number and from type int" return ans def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( is_prime(lowerCamelCase) and is_prime(lowerCamelCase) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __lowerCAmelCase = p_number_a + 1 # jump to the next number __lowerCAmelCase = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCamelCase): number += 1 while number < p_number_a: ans.append(lowerCamelCase) number += 1 # fetch the next prime number. while not is_prime(lowerCamelCase): number += 1 # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and ans[0] != p_number_a and ans[len(lowerCamelCase) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 1), "'n' must been int and >= 1" __lowerCAmelCase = [] # will be returned. for divisor in range(1, n + 1): if n % divisor == 0: ans.append(lowerCamelCase) # precondition assert ans[0] == 1 and ans[len(lowerCamelCase) - 1] == n, "Error in function getDivisiors(...)" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number > 1 ), "'number' must been an int and >= 1" __lowerCAmelCase = get_divisors(lowerCamelCase) # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (divisors[0] == 1) and (divisors[len(lowerCamelCase) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1]) == number def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __lowerCAmelCase = gcd(abs(lowerCamelCase), abs(lowerCamelCase)) # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been a int and >= 0" __lowerCAmelCase = 1 # this will be return. for factor in range(1, n + 1): ans *= factor return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been an int and >= 0" __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = 1 # this will be return for _ in range(n - 1): __lowerCAmelCase = ans ans += fiba __lowerCAmelCase = tmp return ans
9
0
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: __lowerCAmelCase = ksize + 1 __lowerCAmelCase = np.zeros((ksize, ksize), dtype=np.floataa) # each value for y in range(__lowerCAmelCase): for x in range(__lowerCAmelCase): # distance from center __lowerCAmelCase = x - ksize // 2 __lowerCAmelCase = y - ksize // 2 # degree to radiant __lowerCAmelCase = theta / 1_8_0 * np.pi __lowerCAmelCase = np.cos(_theta) __lowerCAmelCase = np.sin(_theta) # get kernel x __lowerCAmelCase = cos_theta * px + sin_theta * py # get kernel y __lowerCAmelCase = -sin_theta * px + cos_theta * py # fill kernel __lowerCAmelCase = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2)) * np.cos(2 * np.pi * _x / lambd + psi) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image _UpperCAmelCase : Tuple = imread("""../image_data/lena.jpg""") # turn image in gray scale value _UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges _UpperCAmelCase : Optional[Any] = np.zeros(gray.shape[:2]) for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]: _UpperCAmelCase : Optional[int] = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) _UpperCAmelCase : Optional[int] = out / out.max() * 2_5_5 _UpperCAmelCase : Tuple = out.astype(np.uinta) imshow("""Original""", gray) imshow("""Gabor filter with 20x20 mask and 6 directions""", out) waitKey(0)
370
'''simple docstring''' import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed _UpperCAmelCase : Dict = """true""" def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6): set_seed(4_2) __lowerCAmelCase = RegressionModel() __lowerCAmelCase = deepcopy(lowerCamelCase) __lowerCAmelCase = RegressionDataset(length=lowerCamelCase) __lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase) model.to(accelerator.device) __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase) return model, ddp_model, dataloader def __magic_name__( lowerCamelCase, lowerCamelCase=False): __lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''') __lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''') def tokenize_function(lowerCamelCase): __lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase) return outputs with accelerator.main_process_first(): __lowerCAmelCase = dataset.map( lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) __lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''') def collate_fn(lowerCamelCase): if use_longest: return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''') return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''') return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6) def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase) __lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [] for batch in dataloader: __lowerCAmelCase , __lowerCAmelCase = batch.values() with torch.no_grad(): __lowerCAmelCase = model(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) __lowerCAmelCase , __lowerCAmelCase = [], [] for logit, targ in logits_and_targets: logits.append(lowerCamelCase) targs.append(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase) return logits, targs def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase) assert ( len(lowerCamelCase) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}""" def __magic_name__( lowerCamelCase = False, lowerCamelCase = False): __lowerCAmelCase = evaluate.load('''glue''', '''mrpc''') __lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase) # First do baseline __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no'''] model.to(lowerCamelCase) model.eval() for batch in dataloader: batch.to(lowerCamelCase) with torch.inference_mode(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=lowerCamelCase, references=batch['''labels''']) __lowerCAmelCase = metric.compute() # Then do distributed __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) __lowerCAmelCase = batch['''labels'''] __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase) __lowerCAmelCase = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def __magic_name__( ): __lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''') for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""") test_mrpc(lowerCamelCase, lowerCamelCase) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''') for split_batches in [True, False]: for dispatch_batches in [True, False]: __lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""") test_torch_metrics(lowerCamelCase, 9_9) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''') __lowerCAmelCase = Accelerator() test_torch_metrics(lowerCamelCase, 5_1_2) accelerator.state._reset_state() def __magic_name__( lowerCamelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
9
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class a__ : """simple docstring""" __UpperCamelCase : Optional[str] = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) __UpperCamelCase : Optional[str] = field( default=_SCREAMING_SNAKE_CASE , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) __UpperCamelCase : Optional[str] = field( default=_SCREAMING_SNAKE_CASE , metadata={'help': 'The column name of the images in the files.'} ) __UpperCamelCase : Optional[str] = field(default=_SCREAMING_SNAKE_CASE , metadata={'help': 'A folder containing the training data.'} ) __UpperCamelCase : Optional[str] = field(default=_SCREAMING_SNAKE_CASE , metadata={'help': 'A folder containing the validation data.'} ) __UpperCamelCase : Optional[float] = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} ) __UpperCamelCase : Optional[int] = field( default=_SCREAMING_SNAKE_CASE , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) __UpperCamelCase : Optional[int] = field( default=_SCREAMING_SNAKE_CASE , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def _snake_case (self ): __lowerCAmelCase = {} if self.train_dir is not None: __lowerCAmelCase = self.train_dir if self.validation_dir is not None: __lowerCAmelCase = self.validation_dir __lowerCAmelCase = data_files if data_files else None @dataclass class a__ : """simple docstring""" __UpperCamelCase : str = field( default=_SCREAMING_SNAKE_CASE , metadata={ 'help': ( 'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.' ) } , ) __UpperCamelCase : Optional[str] = field( default=_SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} ) __UpperCamelCase : Optional[str] = field( default=_SCREAMING_SNAKE_CASE , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) __UpperCamelCase : Optional[str] = field( default=_SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} ) __UpperCamelCase : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) __UpperCamelCase : str = field(default=_SCREAMING_SNAKE_CASE , metadata={'help': 'Name or path of preprocessor config.'} ) __UpperCamelCase : bool = field( default=_SCREAMING_SNAKE_CASE , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) __UpperCamelCase : float = field( default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} ) __UpperCamelCase : bool = field( default=_SCREAMING_SNAKE_CASE , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} ) @dataclass class a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __UpperCamelCase : float = field( default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} ) def __magic_name__( lowerCamelCase): __lowerCAmelCase = torch.stack([example['''pixel_values'''] for example in examples]) return {"pixel_values": pixel_values} def __magic_name__( ): __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith('''.json'''): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mae''', lowerCamelCase, lowerCamelCase) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(lowerCamelCase) transformers.utils.logging.set_verbosity(lowerCamelCase) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""") logger.info(F"""Training/evaluation parameters {training_args}""") # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''') elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''') # Initialize our dataset. __lowerCAmelCase = load_dataset( data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # If we don't have a validation split, split off a percentage of train as validation. __lowerCAmelCase = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split, lowerCamelCase) and data_args.train_val_split > 0.0: __lowerCAmelCase = ds['''train'''].train_test_split(data_args.train_val_split) __lowerCAmelCase = split['''train'''] __lowerCAmelCase = split['''test'''] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: __lowerCAmelCase = ViTMAEConfig.from_pretrained(model_args.config_name, **lowerCamelCase) elif model_args.model_name_or_path: __lowerCAmelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path, **lowerCamelCase) else: __lowerCAmelCase = ViTMAEConfig() logger.warning('''You are instantiating a new config instance from scratch.''') if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""") config.update_from_string(model_args.config_overrides) logger.info(F"""New config: {config}""") # adapt config config.update( { '''mask_ratio''': model_args.mask_ratio, '''norm_pix_loss''': model_args.norm_pix_loss, }) # create image processor if model_args.image_processor_name: __lowerCAmelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name, **lowerCamelCase) elif model_args.model_name_or_path: __lowerCAmelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path, **lowerCamelCase) else: __lowerCAmelCase = ViTImageProcessor() # create model if model_args.model_name_or_path: __lowerCAmelCase = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path), config=lowerCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) else: logger.info('''Training new model from scratch''') __lowerCAmelCase = ViTMAEForPreTraining(lowerCamelCase) if training_args.do_train: __lowerCAmelCase = ds['''train'''].column_names else: __lowerCAmelCase = ds['''validation'''].column_names if data_args.image_column_name is not None: __lowerCAmelCase = data_args.image_column_name elif "image" in column_names: __lowerCAmelCase = '''image''' elif "img" in column_names: __lowerCAmelCase = '''img''' else: __lowerCAmelCase = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: __lowerCAmelCase = image_processor.size['''shortest_edge'''] else: __lowerCAmelCase = (image_processor.size['''height'''], image_processor.size['''width''']) __lowerCAmelCase = Compose( [ Lambda(lambda lowerCamelCase: img.convert('''RGB''') if img.mode != "RGB" else img), RandomResizedCrop(lowerCamelCase, scale=(0.2, 1.0), interpolation=InterpolationMode.BICUBIC), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean, std=image_processor.image_std), ]) def preprocess_images(lowerCamelCase): __lowerCAmelCase = [transforms(lowerCamelCase) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''') if data_args.max_train_samples is not None: __lowerCAmelCase = ds['''train'''].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) # Set the training transforms ds["train"].set_transform(lowerCamelCase) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''') if data_args.max_eval_samples is not None: __lowerCAmelCase = ( ds['''validation'''].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) ) # Set the validation transforms ds["validation"].set_transform(lowerCamelCase) # Compute absolute learning rate __lowerCAmelCase = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: __lowerCAmelCase = training_args.base_learning_rate * total_train_batch_size / 2_5_6 # Initialize our trainer __lowerCAmelCase = Trainer( model=lowerCamelCase, args=lowerCamelCase, train_dataset=ds['''train'''] if training_args.do_train else None, eval_dataset=ds['''validation'''] if training_args.do_eval else None, tokenizer=lowerCamelCase, data_collator=lowerCamelCase, ) # Training if training_args.do_train: __lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: __lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCAmelCase = last_checkpoint __lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase) trainer.save_model() trainer.log_metrics('''train''', train_result.metrics) trainer.save_metrics('''train''', train_result.metrics) trainer.save_state() # Evaluation if training_args.do_eval: __lowerCAmelCase = trainer.evaluate() trainer.log_metrics('''eval''', lowerCamelCase) trainer.save_metrics('''eval''', lowerCamelCase) # Write model card and (optionally) push to hub __lowerCAmelCase = { '''tasks''': '''masked-auto-encoding''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-auto-encoding'''], } if training_args.push_to_hub: trainer.push_to_hub(**lowerCamelCase) else: trainer.create_model_card(**lowerCamelCase) def __magic_name__( lowerCamelCase): main() if __name__ == "__main__": main()
371
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : str = logging.get_logger(__name__) _UpperCAmelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class a__ ( __A ): """simple docstring""" __UpperCamelCase : str = 'roberta' def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ): super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = use_cache __lowerCAmelCase = classifier_dropout class a__ ( __A ): """simple docstring""" @property def _snake_case (self ): if self.task == "multiple-choice": __lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
9
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Tuple = { """facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""", """facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""", """facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""", """facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""", """facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""", """facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""", """facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""", """facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""", """facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""", } class a__ ( __A ): """simple docstring""" __UpperCamelCase : Tuple = 'xmod' def __init__(self , __lowercase=3_05_22 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , __lowercase=False , __lowercase=2 , __lowercase=False , __lowercase=True , __lowercase=True , __lowercase=("en_XX",) , __lowercase=None , **__lowercase , ): super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = use_cache __lowerCAmelCase = classifier_dropout __lowerCAmelCase = pre_norm __lowerCAmelCase = adapter_reduction_factor __lowerCAmelCase = adapter_layer_norm __lowerCAmelCase = adapter_reuse_layer_norm __lowerCAmelCase = ln_before_adapter __lowerCAmelCase = list(__lowercase ) __lowerCAmelCase = default_language class a__ ( __A ): """simple docstring""" @property def _snake_case (self ): if self.task == "multiple-choice": __lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
350
'''simple docstring''' import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = old_name if "patch_embed" in old_name: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_name.split('''.''') if layer == "0": __lowerCAmelCase = old_name.replace('''0''', '''convolution1''') elif layer == "1": __lowerCAmelCase = old_name.replace('''1''', '''batchnorm_before''') elif layer == "3": __lowerCAmelCase = old_name.replace('''3''', '''convolution2''') else: __lowerCAmelCase = old_name.replace('''4''', '''batchnorm_after''') if "network" in old_name and re.search(r'''\d\.\d''', lowerCamelCase): __lowerCAmelCase = r'''\b\d{2}\b''' if bool(re.search(lowerCamelCase, lowerCamelCase)): __lowerCAmelCase = re.search(r'''\d\.\d\d.''', lowerCamelCase).group() else: __lowerCAmelCase = re.search(r'''\d\.\d.''', lowerCamelCase).group() if int(match[0]) < 6: __lowerCAmelCase = old_name.replace(lowerCamelCase, '''''') __lowerCAmelCase = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1]) __lowerCAmelCase = '''intermediate_stages.''' + trimmed_name else: __lowerCAmelCase = old_name.replace(lowerCamelCase, '''''') if int(match[2]) < num_meta4D_last_stage: __lowerCAmelCase = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2]) else: __lowerCAmelCase = str(int(match[2]) - num_meta4D_last_stage) __lowerCAmelCase = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index) if "norm1" in old_name: __lowerCAmelCase = trimmed_name.replace('''norm1''', '''layernorm1''') elif "norm2" in old_name: __lowerCAmelCase = trimmed_name.replace('''norm2''', '''layernorm2''') elif "fc1" in old_name: __lowerCAmelCase = trimmed_name.replace('''fc1''', '''linear_in''') elif "fc2" in old_name: __lowerCAmelCase = trimmed_name.replace('''fc2''', '''linear_out''') __lowerCAmelCase = '''last_stage.''' + trimmed_name elif "network" in old_name and re.search(r'''.\d.''', lowerCamelCase): __lowerCAmelCase = old_name.replace('''network''', '''intermediate_stages''') if "fc" in new_name: __lowerCAmelCase = new_name.replace('''fc''', '''convolution''') elif ("norm1" in new_name) and ("layernorm1" not in new_name): __lowerCAmelCase = new_name.replace('''norm1''', '''batchnorm_before''') elif ("norm2" in new_name) and ("layernorm2" not in new_name): __lowerCAmelCase = new_name.replace('''norm2''', '''batchnorm_after''') if "proj" in new_name: __lowerCAmelCase = new_name.replace('''proj''', '''projection''') if "dist_head" in new_name: __lowerCAmelCase = new_name.replace('''dist_head''', '''distillation_classifier''') elif "head" in new_name: __lowerCAmelCase = new_name.replace('''head''', '''classifier''') elif "patch_embed" in new_name: __lowerCAmelCase = '''efficientformer.''' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": __lowerCAmelCase = new_name.replace('''norm''', '''layernorm''') __lowerCAmelCase = '''efficientformer.''' + new_name else: __lowerCAmelCase = '''efficientformer.encoder.''' + new_name return new_name def __magic_name__( lowerCamelCase, lowerCamelCase): for key in checkpoint.copy().keys(): __lowerCAmelCase = checkpoint.pop(lowerCamelCase) __lowerCAmelCase = val return checkpoint def __magic_name__( ): __lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw) return image def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model'''] __lowerCAmelCase = EfficientFormerConfig.from_json_file(lowerCamelCase) __lowerCAmelCase = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase) __lowerCAmelCase = '''_'''.join(checkpoint_path.split('''/''')[-1].split('''.''')[0].split('''_''')[:-1]) __lowerCAmelCase = config.depths[-1] - config.num_metaad_blocks + 1 __lowerCAmelCase = convert_torch_checkpoint(lowerCamelCase, lowerCamelCase) model.load_state_dict(lowerCamelCase) model.eval() __lowerCAmelCase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } # prepare image __lowerCAmelCase = prepare_img() __lowerCAmelCase = 2_5_6 __lowerCAmelCase = 2_2_4 __lowerCAmelCase = EfficientFormerImageProcessor( size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], ) __lowerCAmelCase = processor(images=lowerCamelCase, return_tensors='''pt''').pixel_values # original processing pipeline __lowerCAmelCase = Compose( [ Resize(lowerCamelCase, interpolation=pillow_resamplings['''bicubic''']), CenterCrop(lowerCamelCase), ToTensor(), Normalize(lowerCamelCase, lowerCamelCase), ]) __lowerCAmelCase = image_transforms(lowerCamelCase).unsqueeze(0) assert torch.allclose(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = model(lowerCamelCase) __lowerCAmelCase = outputs.logits __lowerCAmelCase = (1, 1_0_0_0) if "l1" in model_name: __lowerCAmelCase = torch.Tensor( [-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28]) assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3) assert logits.shape == expected_shape elif "l3" in model_name: __lowerCAmelCase = torch.Tensor( [-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27]) assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3) assert logits.shape == expected_shape elif "l7" in model_name: __lowerCAmelCase = torch.Tensor( [-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78]) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""") # Save Checkpoints Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) model.save_pretrained(lowerCamelCase) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""") processor.save_pretrained(lowerCamelCase) print(F"""Processor successfuly saved at {pytorch_dump_path}""") if push_to_hub: print('''Pushing model to the hub...''') model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=lowerCamelCase, ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=lowerCamelCase, ) if __name__ == "__main__": _UpperCAmelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) _UpperCAmelCase : List[str] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
9
0
'''simple docstring''' from math import factorial def __magic_name__( lowerCamelCase = 1_0_0): return sum(int(lowerCamelCase) for x in str(factorial(lowerCamelCase))) if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
351
'''simple docstring''' from __future__ import annotations import math def __magic_name__( lowerCamelCase, lowerCamelCase): if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2: raise Exception('''Matrices are not 2x2''') __lowerCAmelCase = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def __magic_name__( lowerCamelCase, lowerCamelCase): return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(lowerCamelCase)) ] def __magic_name__( lowerCamelCase, lowerCamelCase): return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(lowerCamelCase)) ] def __magic_name__( lowerCamelCase): if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0: raise Exception('''Odd matrices are not supported!''') __lowerCAmelCase = len(lowerCamelCase) __lowerCAmelCase = matrix_length // 2 __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)] __lowerCAmelCase = [ [a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase) ] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)] return top_left, top_right, bot_left, bot_right def __magic_name__( lowerCamelCase): return len(lowerCamelCase), len(matrix[0]) def __magic_name__( lowerCamelCase): print('''\n'''.join(str(lowerCamelCase) for line in matrix)) def __magic_name__( lowerCamelCase, lowerCamelCase): if matrix_dimensions(lowerCamelCase) == (2, 2): return default_matrix_multiplication(lowerCamelCase, lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase) __lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase) __lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase) __lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase) # construct the new matrix from our 4 quadrants __lowerCAmelCase = [] for i in range(len(lowerCamelCase)): new_matrix.append(top_left[i] + top_right[i]) for i in range(len(lowerCamelCase)): new_matrix.append(bot_left[i] + bot_right[i]) return new_matrix def __magic_name__( lowerCamelCase, lowerCamelCase): if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]: __lowerCAmelCase = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(lowerCamelCase) __lowerCAmelCase = matrix_dimensions(lowerCamelCase) __lowerCAmelCase = matrix_dimensions(lowerCamelCase) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase) __lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase)))) __lowerCAmelCase = matrixa __lowerCAmelCase = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0, lowerCamelCase): if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): new_matrixa[i].append(0) else: new_matrixa.append([0] * maxim) if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): new_matrixa[i].append(0) else: new_matrixa.append([0] * maxim) __lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase) # Removing the additional zeros for i in range(0, lowerCamelCase): if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": _UpperCAmelCase : List[str] = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] _UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]] print(strassen(matrixa, matrixa))
9
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType _UpperCAmelCase : str = logging.get_logger(__name__) _UpperCAmelCase : Dict = { """openai/imagegpt-small""": """""", """openai/imagegpt-medium""": """""", """openai/imagegpt-large""": """""", } class a__ ( __A ): """simple docstring""" __UpperCamelCase : Optional[Any] = 'imagegpt' __UpperCamelCase : Dict = ['past_key_values'] __UpperCamelCase : Optional[int] = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__(self , __lowercase=5_12 + 1 , __lowercase=32 * 32 , __lowercase=5_12 , __lowercase=24 , __lowercase=8 , __lowercase=None , __lowercase="quick_gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1e-5 , __lowercase=0.0_2 , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=False , **__lowercase , ): __lowerCAmelCase = vocab_size __lowerCAmelCase = n_positions __lowerCAmelCase = n_embd __lowerCAmelCase = n_layer __lowerCAmelCase = n_head __lowerCAmelCase = n_inner __lowerCAmelCase = activation_function __lowerCAmelCase = resid_pdrop __lowerCAmelCase = embd_pdrop __lowerCAmelCase = attn_pdrop __lowerCAmelCase = layer_norm_epsilon __lowerCAmelCase = initializer_range __lowerCAmelCase = scale_attn_weights __lowerCAmelCase = use_cache __lowerCAmelCase = scale_attn_by_inverse_layer_idx __lowerCAmelCase = reorder_and_upcast_attn __lowerCAmelCase = tie_word_embeddings super().__init__(tie_word_embeddings=__lowercase , **__lowercase ) class a__ ( __A ): """simple docstring""" @property def _snake_case (self ): return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def _snake_case (self , __lowercase , __lowercase = 1 , __lowercase = -1 , __lowercase = False , __lowercase = None , __lowercase = 3 , __lowercase = 32 , __lowercase = 32 , ): __lowerCAmelCase = self._generate_dummy_images(__lowercase , __lowercase , __lowercase , __lowercase ) __lowerCAmelCase = dict(preprocessor(images=__lowercase , return_tensors=__lowercase ) ) return inputs
352
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) __lowerCAmelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowercase , __lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase ) __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowercase ) self.assertIsInstance(processor_fast.tokenizer , __lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowercase ) self.assertIsInstance(processor_fast.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 ) __lowerCAmelCase = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' ) __lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = tokenizer(__lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__lowercase ) __lowerCAmelCase = tokenizer.batch_decode(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
9
0
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def __magic_name__( *lowerCamelCase, lowerCamelCase = None, lowerCamelCase=True, lowerCamelCase=2): from .. import __version__ __lowerCAmelCase = take_from __lowerCAmelCase = () if not isinstance(args[0], lowerCamelCase): __lowerCAmelCase = (args,) for attribute, version_name, message in args: if version.parse(version.parse(lowerCamelCase).base_version) >= version.parse(lowerCamelCase): raise ValueError( F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'""" F""" version {__version__} is >= {version_name}""") __lowerCAmelCase = None if isinstance(lowerCamelCase, lowerCamelCase) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(lowerCamelCase),) __lowerCAmelCase = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}.""" elif hasattr(lowerCamelCase, lowerCamelCase): values += (getattr(lowerCamelCase, lowerCamelCase),) __lowerCAmelCase = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}.""" elif deprecated_kwargs is None: __lowerCAmelCase = F"""`{attribute}` is deprecated and will be removed in version {version_name}.""" if warning is not None: __lowerCAmelCase = warning + ''' ''' if standard_warn else '''''' warnings.warn(warning + message, lowerCamelCase, stacklevel=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) > 0: __lowerCAmelCase = inspect.getouterframes(inspect.currentframe())[1] __lowerCAmelCase = call_frame.filename __lowerCAmelCase = call_frame.lineno __lowerCAmelCase = call_frame.function __lowerCAmelCase , __lowerCAmelCase = next(iter(deprecated_kwargs.items())) raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""") if len(lowerCamelCase) == 0: return elif len(lowerCamelCase) == 1: return values[0] return values
353
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class a__ ( __A ): """simple docstring""" def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ): __lowerCAmelCase = 1.0 if scale is None else scale __lowerCAmelCase = 0.0 if loc is None else loc super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] ) @property def _snake_case (self ): return self.base_dist.mean * self.scale + self.loc @property def _snake_case (self ): return self.base_dist.variance * self.scale**2 @property def _snake_case (self ): return self.variance.sqrt() class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ): super().__init__(**__lowercase ) __lowerCAmelCase = args_dim __lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] ) __lowerCAmelCase = domain_map def _snake_case (self , __lowercase ): __lowerCAmelCase = [proj(__lowercase ) for proj in self.proj] return self.domain_map(*__lowercase ) class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase ): super().__init__() __lowerCAmelCase = function def _snake_case (self , __lowercase , *__lowercase ): return self.function(__lowercase , *__lowercase ) class a__ : """simple docstring""" __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__(self , __lowercase = 1 ): __lowerCAmelCase = dim __lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim} def _snake_case (self , __lowercase ): if self.dim == 1: return self.distribution_class(*__lowercase ) else: return Independent(self.distribution_class(*__lowercase ) , 1 ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ): __lowerCAmelCase = self._base_distribution(__lowercase ) if loc is None and scale is None: return distr else: return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim ) @property def _snake_case (self ): return () if self.dim == 1 else (self.dim,) @property def _snake_case (self ): return len(self.event_shape ) @property def _snake_case (self ): return 0.0 def _snake_case (self , __lowercase ): return ParameterProjection( in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _snake_case (self , *__lowercase ): raise NotImplementedError() @staticmethod def _snake_case (__lowercase ): return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0 class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def _snake_case (cls , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) __lowerCAmelCase = 2.0 + cls.squareplus(__lowercase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def _snake_case (cls , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def _snake_case (cls , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _snake_case (self , __lowercase ): __lowerCAmelCase , __lowerCAmelCase = distr_args if self.dim == 1: return self.distribution_class(total_count=__lowercase , logits=__lowercase ) else: return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ): __lowerCAmelCase , __lowerCAmelCase = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
9
0
'''simple docstring''' from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class a__ ( __A ): """simple docstring""" __UpperCamelCase : List[Any] = 'Salesforce/blip-image-captioning-base' __UpperCamelCase : Union[str, Any] = ( 'This is a tool that generates a description of an image. It takes an input named `image` which should be the ' 'image to caption, and returns a text that contains the description in English.' ) __UpperCamelCase : Union[str, Any] = 'image_captioner' __UpperCamelCase : Optional[int] = AutoModelForVisionaSeq __UpperCamelCase : Any = ['image'] __UpperCamelCase : str = ['text'] def __init__(self , *__lowercase , **__lowercase ): requires_backends(self , ['''vision'''] ) super().__init__(*__lowercase , **__lowercase ) def _snake_case (self , __lowercase ): return self.pre_processor(images=__lowercase , return_tensors='''pt''' ) def _snake_case (self , __lowercase ): return self.model.generate(**__lowercase ) def _snake_case (self , __lowercase ): return self.pre_processor.batch_decode(__lowercase , skip_special_tokens=__lowercase )[0].strip()
354
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class a__ ( __A ): """simple docstring""" __UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa' __UpperCamelCase : List[str] = ( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) __UpperCamelCase : Optional[int] = 'document_qa' __UpperCamelCase : Optional[int] = AutoProcessor __UpperCamelCase : Tuple = VisionEncoderDecoderModel __UpperCamelCase : Any = ['image', 'text'] __UpperCamelCase : Optional[Any] = ['text'] def __init__(self , *__lowercase , **__lowercase ): if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' __lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase ) __lowerCAmelCase = self.pre_processor.tokenizer( __lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids __lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _snake_case (self , __lowercase ): return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences def _snake_case (self , __lowercase ): __lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0] __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) __lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token __lowerCAmelCase = self.pre_processor.tokenajson(__lowercase ) return sequence["answer"]
9
0
'''simple docstring''' class a__ : """simple docstring""" def __init__(self , __lowercase ): __lowerCAmelCase = len(__lowercase ) __lowerCAmelCase = [0] * len_array if len_array > 0: __lowerCAmelCase = array[0] for i in range(1 , __lowercase ): __lowerCAmelCase = self.prefix_sum[i - 1] + array[i] def _snake_case (self , __lowercase , __lowercase ): if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _snake_case (self , __lowercase ): __lowerCAmelCase = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__lowercase ) return False if __name__ == "__main__": import doctest doctest.testmod()
355
'''simple docstring''' def __magic_name__( lowerCamelCase): __lowerCAmelCase = 1 __lowerCAmelCase = 2 while i * i <= n: __lowerCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __magic_name__( ): __lowerCAmelCase = 1 __lowerCAmelCase = 1 while True: i += 1 t_num += i if count_divisors(lowerCamelCase) > 5_0_0: break return t_num if __name__ == "__main__": print(solution())
9
0
'''simple docstring''' import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer _UpperCAmelCase : List[str] = logging.get_logger(__name__) _UpperCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : Optional[int] = { """vocab_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""", }, """merges_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""", }, """tokenizer_file""": { """Salesforce/codegen-350M-mono""": ( """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json""" ), }, } _UpperCAmelCase : List[str] = { """Salesforce/codegen-350M-mono""": 2_0_4_8, } class a__ ( __A ): """simple docstring""" __UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES __UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Union[str, Any] = ['input_ids', 'attention_mask'] __UpperCamelCase : Optional[int] = CodeGenTokenizer def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<|endoftext|>" , __lowercase="<|endoftext|>" , __lowercase="<|endoftext|>" , __lowercase=False , **__lowercase , ): super().__init__( __lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , ) if kwargs.pop('''add_bos_token''' , __lowercase ): __lowerCAmelCase = kwargs.pop('''name_or_path''' , '''''' ) raise ValueError( '''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.''' '''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n''' F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n""" F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n""" '''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.''' ''' so that the fast tokenizer works correctly.''' ) __lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __lowercase ) != add_prefix_space: __lowerCAmelCase = getattr(__lowercase , pre_tok_state.pop('''type''' ) ) __lowerCAmelCase = add_prefix_space __lowerCAmelCase = pre_tok_class(**__lowercase ) __lowerCAmelCase = add_prefix_space def _snake_case (self , *__lowercase , **__lowercase ): __lowerCAmelCase = kwargs.get('''is_split_into_words''' , __lowercase ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowercase , **__lowercase ) def _snake_case (self , *__lowercase , **__lowercase ): __lowerCAmelCase = kwargs.get('''is_split_into_words''' , __lowercase ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase = None ): __lowerCAmelCase = self._tokenizer.model.save(__lowercase , name=__lowercase ) return tuple(__lowercase ) def _snake_case (self , __lowercase , __lowercase = False , __lowercase = None , __lowercase = None , **__lowercase , ): __lowerCAmelCase = super().decode( token_ids=__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , ) if truncate_before_pattern is not None and len(__lowercase ) > 0: __lowerCAmelCase = self.truncate(__lowercase , __lowercase ) return decoded_text def _snake_case (self , __lowercase , __lowercase ): def find_re(__lowercase , __lowercase , __lowercase ): __lowerCAmelCase = pattern.search(__lowercase , __lowercase ) return m.start() if m else -1 __lowerCAmelCase = [re.compile(__lowercase , re.MULTILINE ) for pattern in truncate_before_pattern] __lowerCAmelCase = list(re.finditer('''^print''' , __lowercase , re.MULTILINE ) ) if len(__lowercase ) > 1: __lowerCAmelCase = completion[: prints[1].start()] __lowerCAmelCase = list(re.finditer('''^def''' , __lowercase , re.MULTILINE ) ) if len(__lowercase ) > 1: __lowerCAmelCase = completion[: defs[1].start()] __lowerCAmelCase = 0 __lowerCAmelCase = [ pos for pos in [find_re(__lowercase , __lowercase , __lowercase ) for terminal in terminals] if pos != -1 ] if len(__lowercase ) > 0: return completion[: min(__lowercase )] else: return completion
356
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) self.assertTrue(isinstance(dc.token_ids , __lowercase ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _snake_case (self ): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __lowerCAmelCase = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowercase ): DisjunctiveConstraint(__lowercase ) # fails here def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 ) __lowerCAmelCase = stepped is True and completed is True and reset is False self.assertTrue(__lowercase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
9
0
'''simple docstring''' from __future__ import annotations from typing import Any def __magic_name__( lowerCamelCase): if not postfix_notation: return 0 __lowerCAmelCase = {'''+''', '''-''', '''*''', '''/'''} __lowerCAmelCase = [] for token in postfix_notation: if token in operations: __lowerCAmelCase , __lowerCAmelCase = stack.pop(), stack.pop() if token == "+": stack.append(a + b) elif token == "-": stack.append(a - b) elif token == "*": stack.append(a * b) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1) else: stack.append(a // b) else: stack.append(int(lowerCamelCase)) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
357
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets _UpperCAmelCase : List[str] = """ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ _UpperCAmelCase : str = """ Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric(\"mean_iou\") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ _UpperCAmelCase : Tuple = """\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }""" def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): if label_map is not None: for old_id, new_id in label_map.items(): __lowerCAmelCase = new_id # turn into Numpy arrays __lowerCAmelCase = np.array(lowerCamelCase) __lowerCAmelCase = np.array(lowerCamelCase) if reduce_labels: __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label - 1 __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label != ignore_index __lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = pred_label[mask] __lowerCAmelCase = np.array(lowerCamelCase)[mask] __lowerCAmelCase = pred_label[pred_label == label] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) # compute metrics __lowerCAmelCase = {} __lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum() __lowerCAmelCase = total_area_intersect / total_area_union __lowerCAmelCase = total_area_intersect / total_area_label __lowerCAmelCase = np.nanmean(lowerCamelCase) __lowerCAmelCase = np.nanmean(lowerCamelCase) __lowerCAmelCase = all_acc __lowerCAmelCase = iou __lowerCAmelCase = acc if nan_to_num is not None: __lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def _snake_case (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { '''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), '''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), } ) , reference_urls=[ '''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py''' ] , ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ): __lowerCAmelCase = mean_iou( results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , ) return iou_result
9
0
'''simple docstring''' def __magic_name__( lowerCamelCase, lowerCamelCase): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''') __lowerCAmelCase = str(bin(lowerCamelCase))[2:] # remove the leading "0b" __lowerCAmelCase = str(bin(lowerCamelCase))[2:] # remove the leading "0b" __lowerCAmelCase = max(len(lowerCamelCase), len(lowerCamelCase)) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''')) for char_a, char_b in zip(a_binary.zfill(lowerCamelCase), b_binary.zfill(lowerCamelCase))) if __name__ == "__main__": import doctest doctest.testmod()
358
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : str = DebertaTokenizer __UpperCamelCase : str = True __UpperCamelCase : Any = DebertaTokenizerFast def _snake_case (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCAmelCase = {'''unk_token''': '''[UNK]'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) def _snake_case (self , **__lowercase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = '''lower newer''' return input_text, output_text def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowerCAmelCase = tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) __lowerCAmelCase = tokens + [tokenizer.unk_token] __lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = tokenizer('''Hello''' , '''World''' ) __lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __lowercase ) @slow def _snake_case (self ): __lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case (self ): __lowerCAmelCase = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] __lowerCAmelCase = tokenizer(__lowercase , padding=__lowercase ) __lowerCAmelCase = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']] # fmt: off __lowerCAmelCase = { '''input_ids''': [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __lowercase ) for expected, decoded in zip(__lowercase , __lowercase ): self.assertEqual(__lowercase , __lowercase )
9
0
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase : Dict = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Any = [ """MRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MraForMaskedLM""", """MraForMultipleChoice""", """MraForQuestionAnswering""", """MraForSequenceClassification""", """MraForTokenClassification""", """MraLayer""", """MraModel""", """MraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys _UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
359
'''simple docstring''' import argparse import datetime def __magic_name__( lowerCamelCase): __lowerCAmelCase = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } __lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowerCamelCase) < 1_1: raise ValueError('''Must be 10 characters long''') # Get month __lowerCAmelCase = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 1_3: raise ValueError('''Month must be between 1 - 12''') __lowerCAmelCase = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get day __lowerCAmelCase = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 3_2: raise ValueError('''Date must be between 1 - 31''') # Get second separator __lowerCAmelCase = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get year __lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 4_5 < y < 8_5_0_0: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''') # Get datetime obj for validation __lowerCAmelCase = datetime.date(int(lowerCamelCase), int(lowerCamelCase), int(lowerCamelCase)) # Start math if m <= 2: __lowerCAmelCase = y - 1 __lowerCAmelCase = m + 1_2 # maths var __lowerCAmelCase = int(str(lowerCamelCase)[:2]) __lowerCAmelCase = int(str(lowerCamelCase)[2:]) __lowerCAmelCase = int(2.6 * m - 5.39) __lowerCAmelCase = int(c / 4) __lowerCAmelCase = int(k / 4) __lowerCAmelCase = int(d + k) __lowerCAmelCase = int(t + u + v + x) __lowerCAmelCase = int(z - (2 * c)) __lowerCAmelCase = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''') # Response __lowerCAmelCase = F"""Your date {date_input}, is a {days[str(lowerCamelCase)]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : List[str] = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) _UpperCAmelCase : Dict = parser.parse_args() zeller(args.date_input)
9
0