code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowercase_ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Any = DebertaTokenizer UpperCAmelCase_ : Optional[int] = True UpperCAmelCase_ : Tuple = DebertaTokenizerFast def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase = {'''unk_token''': '''[UNK]'''} lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) ) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Tuple: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = '''lower newer''' lowerCAmelCase = '''lower newer''' return input_text, output_text def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = '''lower newer''' lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] lowerCAmelCase = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokens + [tokenizer.unk_token] lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = tokenizer('''Hello''' , '''World''' ) lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE ) @slow def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']] # fmt: off lowerCAmelCase = { '''input_ids''': [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE ) for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
338
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]: assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = features.copy() if features else default_expected_features lowerCAmelCase = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: if issubclass(snake_case__ , snake_case__ ): lowerCAmelCase = text_path elif issubclass(snake_case__ , snake_case__ ): lowerCAmelCase = [text_path] lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]: assert isinstance(snake_case__ , snake_case__ ) for split in splits: lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: lowerCAmelCase = tmp_path / '''cache''' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = features.copy() if features else default_expected_features lowerCAmelCase = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any: if split: lowerCAmelCase = {split: text_path} else: lowerCAmelCase = '''train''' lowerCAmelCase = {'''train''': text_path, '''test''': text_path} lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
338
1
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = (DPMSolverSDEScheduler,) UpperCAmelCase_ : Union[str, Any] = 10 def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str: lowerCAmelCase = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__SCREAMING_SNAKE_CASE ) return config def SCREAMING_SNAKE_CASE_ ( self ) ->str: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1e-2 assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1e-3 else: assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1e-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1e-2 assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1e-2 assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1e-3 else: assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1e-2 assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter.to(__SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1e-2 assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1e-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1e-3 else: assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1e-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE , use_karras_sigmas=__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter.to(__SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) for t in scheduler.timesteps: lowerCAmelCase = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1e-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1e-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2 else: assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1e-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
338
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: if isinstance(snake_case__ , snake_case__ ): raise TypeError('''\'float\' object cannot be interpreted as an integer''' ) if isinstance(snake_case__ , snake_case__ ): raise TypeError('''\'str\' object cannot be interpreted as an integer''' ) if num == 0: return "0b0" lowerCAmelCase = False if num < 0: lowerCAmelCase = True lowerCAmelCase = -num lowerCAmelCase = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(snake_case__ ) for e in binary ) return "0b" + "".join(str(snake_case__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
338
1
import numpy as np def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: lowerCAmelCase = int(np.ceil((x_end - xa) / h ) ) lowerCAmelCase = np.zeros((n + 1,) ) lowerCAmelCase = ya lowerCAmelCase = xa for k in range(snake_case__ ): lowerCAmelCase = f(snake_case__ , y[k] ) lowerCAmelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) lowerCAmelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) lowerCAmelCase = f(x + h , y[k] + h * ka ) lowerCAmelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
338
class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any: lowerCAmelCase = name lowerCAmelCase = value lowerCAmelCase = weight def __repr__( self ) ->str: return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: return self.value def SCREAMING_SNAKE_CASE_ ( self ) ->int: return self.name def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: return self.weight def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: return self.value / self.weight def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int: lowerCAmelCase = [] for i in range(len(snake_case__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ ) lowerCAmelCase = [] lowerCAmelCase , lowerCAmelCase = 0.0, 0.0 for i in range(len(snake_case__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: pass if __name__ == "__main__": import doctest doctest.testmod()
338
1
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowercase__ : Dict = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str: return max(metric_fn(snake_case__ , snake_case__ ) for gt in ground_truths ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: lowerCAmelCase = [line.strip() for line in open(snake_case__ , '''r''' ).readlines()] lowerCAmelCase = [] if args.gold_data_mode == "qa": lowerCAmelCase = pd.read_csv(snake_case__ , sep='''\t''' , header=snake_case__ ) for answer_list in data[1]: lowerCAmelCase = ast.literal_eval(snake_case__ ) answers.append(snake_case__ ) else: lowerCAmelCase = [line.strip() for line in open(snake_case__ , '''r''' ).readlines()] lowerCAmelCase = [[reference] for reference in references] lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = 0 for prediction, ground_truths in zip(snake_case__ , snake_case__ ): total += 1 em += metric_max_over_ground_truths(snake_case__ , snake_case__ , snake_case__ ) fa += metric_max_over_ground_truths(snake_case__ , snake_case__ , snake_case__ ) lowerCAmelCase = 1_00.0 * em / total lowerCAmelCase = 1_00.0 * fa / total logger.info(f"F1: {fa:.2f}" ) logger.info(f"EM: {em:.2f}" ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]: lowerCAmelCase = args.k lowerCAmelCase = [line.strip() for line in open(snake_case__ , '''r''' ).readlines()] lowerCAmelCase = [line.strip() for line in open(snake_case__ , '''r''' ).readlines()] lowerCAmelCase = lowerCAmelCase = 0 for hypo, reference in zip(snake_case__ , snake_case__ ): lowerCAmelCase = set(hypo.split('''\t''' )[:k] ) lowerCAmelCase = set(reference.split('''\t''' ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k lowerCAmelCase = 1_00.0 * em / total logger.info(f"Precision@{k}: {em: .2f}" ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: def strip_title(snake_case__ ): if title.startswith('''"''' ): lowerCAmelCase = title[1:] if title.endswith('''"''' ): lowerCAmelCase = title[:-1] return title lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( snake_case__ , return_tensors='''pt''' , padding=snake_case__ , truncation=snake_case__ , )['''input_ids'''].to(args.device ) lowerCAmelCase = rag_model.rag.question_encoder(snake_case__ ) lowerCAmelCase = question_enc_outputs[0] lowerCAmelCase = rag_model.retriever( snake_case__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , ) lowerCAmelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) lowerCAmelCase = [] for docs in all_docs: lowerCAmelCase = [strip_title(snake_case__ ) for title in docs['''title''']] provenance_strings.append('''\t'''.join(snake_case__ ) ) return provenance_strings def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]: with torch.no_grad(): lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( snake_case__ , return_tensors='''pt''' , padding=snake_case__ , truncation=snake_case__ ) lowerCAmelCase = inputs_dict.input_ids.to(args.device ) lowerCAmelCase = inputs_dict.attention_mask.to(args.device ) lowerCAmelCase = rag_model.generate( # rag_model overwrites generate snake_case__ , attention_mask=snake_case__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=snake_case__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) lowerCAmelCase = rag_model.retriever.generator_tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ ) if args.print_predictions: for q, a in zip(snake_case__ , snake_case__ ): logger.info('''Q: {} - A: {}'''.format(snake_case__ , snake_case__ ) ) return answers def SCREAMING_SNAKE_CASE_ ( ) -> Dict: lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=snake_case__ , help=( '''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the''' ''' model_name_or_path''' ) , ) parser.add_argument( '''--index_name''' , default=snake_case__ , choices=['''exact''', '''compressed''', '''legacy'''] , type=snake_case__ , help='''RAG model retriever type''' , ) parser.add_argument( '''--index_path''' , default=snake_case__ , type=snake_case__ , help='''Path to the retrieval index''' , ) parser.add_argument('''--n_docs''' , default=5 , type=snake_case__ , help='''Number of retrieved docs''' ) parser.add_argument( '''--model_name_or_path''' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=snake_case__ , help=( '''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates''' ''' precision@k.''' ) , ) parser.add_argument('''--k''' , default=1 , type=snake_case__ , help='''k for the precision@k calculation''' ) parser.add_argument( '''--evaluation_set''' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='''Path to a file containing evaluation samples''' , ) parser.add_argument( '''--gold_data_path''' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='''Path to a tab-separated file with gold samples''' , ) parser.add_argument( '''--gold_data_mode''' , default='''qa''' , type=snake_case__ , choices=['''qa''', '''ans'''] , help=( '''Format of the gold data file''' '''qa - a single line in the following format: question [tab] answer_list''' '''ans - a single line of the gold file contains the expected answer string''' ) , ) parser.add_argument( '''--predictions_path''' , type=snake_case__ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , ) parser.add_argument( '''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , ) parser.add_argument( '''--eval_batch_size''' , default=8 , type=snake_case__ , help='''Batch size per GPU/CPU for evaluation.''' , ) parser.add_argument( '''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , ) parser.add_argument( '''--num_beams''' , default=4 , type=snake_case__ , help='''Number of beams to be used when generating answers''' , ) parser.add_argument('''--min_length''' , default=1 , type=snake_case__ , help='''Min length of the generated answers''' ) parser.add_argument('''--max_length''' , default=5_0 , type=snake_case__ , help='''Max length of the generated answers''' ) parser.add_argument( '''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , ) parser.add_argument( '''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , ) lowerCAmelCase = parser.parse_args() lowerCAmelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) return args def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Dict: lowerCAmelCase = {} if args.model_type is None: lowerCAmelCase = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith('''rag''' ): lowerCAmelCase = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration lowerCAmelCase = args.n_docs if args.index_name is not None: lowerCAmelCase = args.index_name if args.index_path is not None: lowerCAmelCase = args.index_path else: lowerCAmelCase = BartForConditionalGeneration lowerCAmelCase = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('''Evaluate the following checkpoints: %s''' , snake_case__ ) lowerCAmelCase = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k lowerCAmelCase = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) ) score_fn(snake_case__ , args.predictions_path , args.gold_data_path ) continue logger.info('''***** Running evaluation for {} *****'''.format(snake_case__ ) ) logger.info(''' Batch size = %d''' , args.eval_batch_size ) logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) ) if args.model_type.startswith('''rag''' ): lowerCAmelCase = RagRetriever.from_pretrained(snake_case__ , **snake_case__ ) lowerCAmelCase = model_class.from_pretrained(snake_case__ , retriever=snake_case__ , **snake_case__ ) model.retriever.init_retrieval() else: lowerCAmelCase = model_class.from_pretrained(snake_case__ , **snake_case__ ) model.to(args.device ) with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file: lowerCAmelCase = [] for line in tqdm(snake_case__ ): questions.append(line.strip() ) if len(snake_case__ ) == args.eval_batch_size: lowerCAmelCase = evaluate_batch_fn(snake_case__ , snake_case__ , snake_case__ ) preds_file.write('''\n'''.join(snake_case__ ) + '''\n''' ) preds_file.flush() lowerCAmelCase = [] if len(snake_case__ ) > 0: lowerCAmelCase = evaluate_batch_fn(snake_case__ , snake_case__ , snake_case__ ) preds_file.write('''\n'''.join(snake_case__ ) ) preds_file.flush() score_fn(snake_case__ , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowercase__ : Tuple = get_args() main(args)
338
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). lowercase__ : Optional[int] = [0, 2_5, 5_0] lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5] lowercase__ : int = fuzz.membership.trimf(X, abca) lowercase__ : Tuple = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. lowercase__ : List[str] = np.ones(7_5) lowercase__ : Any = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] lowercase__ : Any = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) lowercase__ : str = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('''Young''') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('''Middle aged''') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('''union''') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('''intersection''') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('''complement_a''') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('''difference a/b''') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('''alg_sum''') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('''alg_product''') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('''bdd_sum''') plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title('''bdd_difference''') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
338
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase__ : int = { '''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : int = [ '''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FalconForCausalLM''', '''FalconModel''', '''FalconPreTrainedModel''', '''FalconForSequenceClassification''', '''FalconForTokenClassification''', '''FalconForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
338
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : str = (DDPMScheduler,) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]: lowerCAmelCase = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__SCREAMING_SNAKE_CASE ) return config def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ): if i == len(__SCREAMING_SNAKE_CASE ) - 1: lowerCAmelCase = -1 else: lowerCAmelCase = timesteps[i + 1] lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 1, 0] lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
338
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): lowerCAmelCase = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = '''sshleifer/tiny-gpt2''' lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = '''sgugger/tiny-distilbert-classification''' lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , only_pretrain_model=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = '''sshleifer/tiny-gpt2''' lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , torchscript=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = '''sshleifer/tiny-gpt2''' lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = '''sshleifer/tiny-gpt2''' lowerCAmelCase = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) # set architectures equal to `None` lowerCAmelCase = None lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = '''sshleifer/tiny-gpt2''' lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = '''sshleifer/tiny-gpt2''' lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__SCREAMING_SNAKE_CASE , multi_process=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = '''sshleifer/tiny-gpt2''' lowerCAmelCase = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = '''sshleifer/tinier_bart''' lowerCAmelCase = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = '''sshleifer/tiny-gpt2''' lowerCAmelCase = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = '''sshleifer/tinier_bart''' lowerCAmelCase = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , save_to_csv=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , '''train_time.csv''' ) , env_info_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , '''env.csv''' ) , multi_process=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) benchmark.run() self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , '''env.csv''' ) ).exists() ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(__SCREAMING_SNAKE_CASE ): self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''sequential''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''cumulative''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''current''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__SCREAMING_SNAKE_CASE , '''log.txt''' ) , log_print=__SCREAMING_SNAKE_CASE , trace_memory_line_by_line=__SCREAMING_SNAKE_CASE , multi_process=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , '''log.txt''' ) ).exists() )
338
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase__ : str = logging.get_logger(__name__) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Any = """AutoTokenizer""" UpperCAmelCase_ : Optional[int] = ["""tokenizer"""] UpperCAmelCase_ : str = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]: super().__init__(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = speaker_embeddings @classmethod def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple: if speaker_embeddings_dict_path is not None: lowerCAmelCase = get_file_from_repo( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , ) if speaker_embeddings_path is None: logger.warning( F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." ) lowerCAmelCase = None else: with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json: lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = None lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int: if self.speaker_embeddings is not None: os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} lowerCAmelCase = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" ) lowerCAmelCase = tmp_dict with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = self.speaker_embeddings[voice_preset] lowerCAmelCase = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." ) lowerCAmelCase = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , ) if path is None: raise ValueError( F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." ) lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE ) return voice_preset_dict def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple: for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F"Voice preset unrecognized, missing {key} as a key." ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int: if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if ( isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE ) else: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ): lowerCAmelCase = voice_preset + '''.npz''' lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE ) if voice_preset is not None: self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.tokenizer( __SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) if voice_preset is not None: lowerCAmelCase = voice_preset return encoded_text
338
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowercase__ : Union[str, Any] = logging.get_logger(__name__) lowercase__ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowercase__ : Union[str, Any] = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowercase__ : List[Any] = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowercase__ : Optional[int] = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowercase__ : Any = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2, } lowercase__ : Union[str, Any] = { '''facebook/dpr-question_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-question_encoder-multiset-base''': 5_1_2, } lowercase__ : Tuple = { '''facebook/dpr-reader-single-nq-base''': 5_1_2, '''facebook/dpr-reader-multiset-base''': 5_1_2, } lowercase__ : str = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowercase__ : List[str] = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowercase__ : Optional[int] = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES UpperCAmelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Tuple = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : Any = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase_ : int = DPRContextEncoderTokenizer class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Any = VOCAB_FILES_NAMES UpperCAmelCase_ : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Optional[int] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase_ : Optional[Any] = DPRQuestionEncoderTokenizer lowercase__ : int = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowercase__ : str = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowercase__ : str = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(UpperCamelCase_ ) class lowercase_ : """simple docstring""" def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->BatchEncoding: if titles is None and texts is None: return super().__call__( __SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) elif titles is None or texts is None: lowerCAmelCase = titles if texts is None else texts return super().__call__( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = titles if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else [titles] lowerCAmelCase = texts if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else [texts] lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = questions if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else [questions] * n_passages assert len(__SCREAMING_SNAKE_CASE ) == len( __SCREAMING_SNAKE_CASE ), F"There should be as many titles than texts but got {len(__SCREAMING_SNAKE_CASE )} titles and {len(__SCREAMING_SNAKE_CASE )} texts." lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )['''input_ids'''] lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )['''input_ids'''] lowerCAmelCase = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ] } if return_attention_mask is not False: lowerCAmelCase = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowerCAmelCase = attention_mask return self.pad(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 , __SCREAMING_SNAKE_CASE = 64 , __SCREAMING_SNAKE_CASE = 4 , ) ->List[DPRSpanPrediction]: lowerCAmelCase = reader_input['''input_ids'''] lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = reader_output[:3] lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = sorted(range(__SCREAMING_SNAKE_CASE ) , reverse=__SCREAMING_SNAKE_CASE , key=relevance_logits.__getitem__ ) lowerCAmelCase = [] for doc_id in sorted_docs: lowerCAmelCase = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowerCAmelCase = sequence_ids.index(self.pad_token_id ) else: lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__SCREAMING_SNAKE_CASE , top_spans=__SCREAMING_SNAKE_CASE , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__SCREAMING_SNAKE_CASE , start_index=__SCREAMING_SNAKE_CASE , end_index=__SCREAMING_SNAKE_CASE , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__SCREAMING_SNAKE_CASE ) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->List[DPRSpanPrediction]: lowerCAmelCase = [] for start_index, start_score in enumerate(__SCREAMING_SNAKE_CASE ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowerCAmelCase = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x[1] , reverse=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]" lowerCAmelCase = end_index - start_index + 1 assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__SCREAMING_SNAKE_CASE ) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCamelCase_ ) class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Any = VOCAB_FILES_NAMES UpperCAmelCase_ : int = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Optional[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : int = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase_ : List[Any] = ["""input_ids""", """attention_mask"""] UpperCAmelCase_ : Optional[int] = DPRReaderTokenizer
338
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
338
1
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor lowercase__ : Optional[int] = logging.get_logger(__name__) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->None: warnings.warn( '''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use LayoutLMv2ImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
338
import os import re import shutil import sys import tempfile import unittest import black lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. lowercase__ : Dict = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) ) lowerCAmelCase = self.transformer_dir shutil.copy( os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = '''src/transformers''' shutil.rmtree(self.transformer_dir ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]: lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' ) with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f: f.write(__SCREAMING_SNAKE_CASE ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f: self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: # Base copy consistency self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , ) # Copy consistency with a really long name lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md'''] lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),''' ''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**''' ''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders''' ''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang''' ''' Luong, Quoc V. Le, Christopher D. Manning.''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文''' ''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自''' ''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather''' ''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,''' ''' Christopher D. Manning 发布。\n''' ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) self.assertFalse(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and''' ''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) # Check if the model link is synchronized. self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
338
1
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
338
import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase = split_dict._to_yaml_list() assert len(snake_case__ ) == len(snake_case__ ) lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCAmelCase = None # the split name of split_dict takes over the name of the split info object lowerCAmelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]: # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
338
1
import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser lowercase__ : Optional[int] = re.compile(R'''\s+''') def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Any: return {"hash": hashlib.mda(re.sub(snake_case__ , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()} def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Any: lowerCAmelCase = [len(snake_case__ ) for line in example['''content'''].splitlines()] return {"line_mean": np.mean(snake_case__ ), "line_max": max(snake_case__ )} def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Dict: lowerCAmelCase = np.mean([c.isalnum() for c in example['''content''']] ) return {"alpha_frac": alpha_frac} def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> List[Any]: if example["hash"] in uniques: uniques.remove(example['''hash'''] ) return True else: return False def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__=5 ) -> List[Any]: lowerCAmelCase = ['''auto-generated''', '''autogenerated''', '''automatically generated'''] lowerCAmelCase = example['''content'''].splitlines() for _, line in zip(range(snake_case__ ) , snake_case__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__=5 , snake_case__=0.05 ) -> List[str]: lowerCAmelCase = ['''unit tests''', '''test file''', '''configuration file'''] lowerCAmelCase = example['''content'''].splitlines() lowerCAmelCase = 0 lowerCAmelCase = 0 # first test for _, line in zip(range(snake_case__ ) , snake_case__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test lowerCAmelCase = example['''content'''].count('''\n''' ) lowerCAmelCase = int(coeff * nlines ) for line in lines: count_config += line.lower().count('''config''' ) count_test += line.lower().count('''test''' ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[Any]: lowerCAmelCase = ['''def ''', '''class ''', '''for ''', '''while '''] lowerCAmelCase = example['''content'''].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__=4 ) -> Union[str, Any]: lowerCAmelCase = example['''content'''].splitlines() lowerCAmelCase = 0 for line in lines: counter += line.lower().count('''=''' ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Tuple: lowerCAmelCase = tokenizer(example['''content'''] , truncation=snake_case__ )['''input_ids'''] lowerCAmelCase = len(example['''content'''] ) / len(snake_case__ ) return {"ratio": ratio} def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]: lowerCAmelCase = {} results.update(get_hash(snake_case__ ) ) results.update(line_stats(snake_case__ ) ) results.update(alpha_stats(snake_case__ ) ) results.update(char_token_ratio(snake_case__ ) ) results.update(is_autogenerated(snake_case__ ) ) results.update(is_config_or_test(snake_case__ ) ) results.update(has_no_keywords(snake_case__ ) ) results.update(has_few_assignments(snake_case__ ) ) return results def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: if not check_uniques(snake_case__ , snake_case__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]: with open(snake_case__ , '''rb''' ) as f_in: with gzip.open(str(snake_case__ ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out: shutil.copyfileobj(snake_case__ , snake_case__ ) os.unlink(snake_case__ ) # Settings lowercase__ : Union[str, Any] = HfArgumentParser(PreprocessingArguments) lowercase__ : str = parser.parse_args() if args.num_workers is None: lowercase__ : Union[str, Any] = multiprocessing.cpu_count() lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset lowercase__ : List[Any] = time.time() lowercase__ : List[str] = load_dataset(args.dataset_name, split='''train''') print(f'Time to load dataset: {time.time()-t_start:.2f}') # Run preprocessing lowercase__ : Dict = time.time() lowercase__ : List[Any] = ds.map(preprocess, num_proc=args.num_workers) print(f'Time to preprocess dataset: {time.time()-t_start:.2f}') # Deduplicate hashes lowercase__ : List[Any] = set(ds.unique('''hash''')) lowercase__ : Any = len(uniques) / len(ds) print(f'Fraction of duplicates: {1-frac:.2%}') # Deduplicate data and apply heuristics lowercase__ : List[str] = time.time() lowercase__ : int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(f'Time to filter dataset: {time.time()-t_start:.2f}') print(f'Size of filtered dataset: {len(ds_filter)}') # Deduplicate with minhash and jaccard similarity if args.near_deduplication: lowercase__ : Dict = time.time() lowercase__ , lowercase__ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(f'Time to deduplicate dataset: {time.time()-t_start:.2f}') print(f'Size of deduplicate dataset: {len(ds_filter)}') # Save data in batches of samples_per_file lowercase__ : Optional[int] = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) lowercase__ : Any = output_dir / '''data''' data_dir.mkdir(exist_ok=True) lowercase__ : Union[str, Any] = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): lowercase__ : List[str] = str(data_dir / f'file-{file_number+1:012}.json') lowercase__ : List[str] = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(f'Time to save dataset: {time.time()-t_start:.2f}')
338
import unittest import numpy as np def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray: lowerCAmelCase = np.shape(snake_case__ ) lowerCAmelCase = np.shape(snake_case__ ) lowerCAmelCase = np.shape(snake_case__ ) if shape_a[0] != shape_b[0]: lowerCAmelCase = ( '''Expected the same number of rows for A and B. ''' f"Instead found A of size {shape_a} and B of size {shape_b}" ) raise ValueError(snake_case__ ) if shape_b[1] != shape_c[1]: lowerCAmelCase = ( '''Expected the same number of columns for B and C. ''' f"Instead found B of size {shape_b} and C of size {shape_c}" ) raise ValueError(snake_case__ ) lowerCAmelCase = pseudo_inv if a_inv is None: try: lowerCAmelCase = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1], [6, 3]] ) lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.block([[a, b], [b.T, c]] ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s ) def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
338
1
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=[8, 16, 32, 64] , __SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , __SCREAMING_SNAKE_CASE=[2, 3, 4] , __SCREAMING_SNAKE_CASE=1 , ) ->Union[str, Any]: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = image_size lowerCAmelCase = num_channels lowerCAmelCase = embeddings_size lowerCAmelCase = hidden_sizes lowerCAmelCase = depths lowerCAmelCase = is_training lowerCAmelCase = use_labels lowerCAmelCase = hidden_act lowerCAmelCase = num_labels lowerCAmelCase = scope lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = out_features lowerCAmelCase = out_indices lowerCAmelCase = num_groups def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[Any]: lowerCAmelCase = BitModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Dict: lowerCAmelCase = self.num_labels lowerCAmelCase = BitForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = BitBackbone(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowerCAmelCase = None lowerCAmelCase = BitBackbone(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = self.prepare_config_and_inputs() lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs lowerCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () UpperCAmelCase_ : Union[str, Any] = ( {"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification} if is_torch_available() else {} ) UpperCAmelCase_ : str = False UpperCAmelCase_ : Dict = False UpperCAmelCase_ : Dict = False UpperCAmelCase_ : Optional[Any] = False UpperCAmelCase_ : Optional[Any] = False def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = BitModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE_ ( self ) ->Any: return @unittest.skip(reason='''Bit does not output attentions''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase = [*signature.parameters.keys()] lowerCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(config=__SCREAMING_SNAKE_CASE ) for name, module in model.named_modules(): if isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCAmelCase = self.model_tester.num_stages self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCAmelCase = layer_type lowerCAmelCase = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = BitModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowercase_ ( unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE_ ( self ) ->int: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.default_image_processor lowerCAmelCase = prepare_img() lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE ) # verify the logits lowerCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @require_torch class lowercase_ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : int = (BitBackbone,) if is_torch_available() else () UpperCAmelCase_ : Optional[int] = BitConfig UpperCAmelCase_ : List[Any] = False def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = BitModelTester(self )
338
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowercase__ : Any = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: lowerCAmelCase = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) lowercase__ : List[Any] = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase = list(s_dict.keys() ) for key in keys: lowerCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ ) print(f"{key} -> {new_key}" ) lowerCAmelCase = s_dict.pop(snake_case__ ) return s_dict def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase , lowerCAmelCase = emb.weight.shape lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) lowerCAmelCase = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes: os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowerCAmelCase = os.path.basename(snake_case__ ) lowerCAmelCase = url.split('''/''' )[-2] lowerCAmelCase = os.path.join(snake_case__ , snake_case__ ) if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ): raise RuntimeError(f"{download_target} exists and is not a regular file" ) if os.path.isfile(snake_case__ ): lowerCAmelCase = open(snake_case__ , '''rb''' ).read() if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop: while True: lowerCAmelCase = source.read(8_1_9_2 ) if not buffer: break output.write(snake_case__ ) loop.update(len(snake_case__ ) ) lowerCAmelCase = open(snake_case__ , '''rb''' ).read() if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: if ".pt" not in checkpoint_path: lowerCAmelCase = _download(_MODELS[checkpoint_path] ) else: lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' ) lowerCAmelCase = original_checkpoint['''dims'''] lowerCAmelCase = original_checkpoint['''model_state_dict'''] lowerCAmelCase = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(snake_case__ ) rename_keys(snake_case__ ) lowerCAmelCase = True lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] lowerCAmelCase = WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ ) lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ ) if len(snake_case__ ) > 0 and not set(snake_case__ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f" but all the following weights are missing {missing}" ) if tie_embeds: lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: lowerCAmelCase = proj_out_weights model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase__ : List[str] = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowercase__ : int = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
338
1
import os import string import sys lowercase__ : Any = 1 << 8 lowercase__ : List[Any] = { '''tab''': ord('''\t'''), '''newline''': ord('''\r'''), '''esc''': 2_7, '''up''': 6_5 + ARROW_KEY_FLAG, '''down''': 6_6 + ARROW_KEY_FLAG, '''right''': 6_7 + ARROW_KEY_FLAG, '''left''': 6_8 + ARROW_KEY_FLAG, '''mod_int''': 9_1, '''undefined''': sys.maxsize, '''interrupt''': 3, '''insert''': 5_0, '''delete''': 5_1, '''pg_up''': 5_3, '''pg_down''': 5_4, } lowercase__ : Union[str, Any] = KEYMAP['''up'''] lowercase__ : int = KEYMAP['''left'''] if sys.platform == "win32": lowercase__ : Union[str, Any] = [] lowercase__ : Any = { B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, } for i in range(1_0): lowercase__ : Tuple = ord(str(i)) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: if os.name == "nt": import msvcrt lowerCAmelCase = '''mbcs''' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(snake_case__ ) == 0: # Read the keystroke lowerCAmelCase = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): lowerCAmelCase = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: lowerCAmelCase = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) ) WIN_CH_BUFFER.append(snake_case__ ) if ord(snake_case__ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(1_2_6 ) ) lowerCAmelCase = chr(KEYMAP['''esc'''] ) except KeyError: lowerCAmelCase = cha[1] else: lowerCAmelCase = ch.decode(snake_case__ ) else: lowerCAmelCase = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty lowerCAmelCase = sys.stdin.fileno() lowerCAmelCase = termios.tcgetattr(snake_case__ ) try: tty.setraw(snake_case__ ) lowerCAmelCase = sys.stdin.read(1 ) finally: termios.tcsetattr(snake_case__ , termios.TCSADRAIN , snake_case__ ) return ch def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: lowerCAmelCase = get_raw_chars() if ord(snake_case__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(snake_case__ ) == KEYMAP["esc"]: lowerCAmelCase = get_raw_chars() if ord(snake_case__ ) == KEYMAP["mod_int"]: lowerCAmelCase = get_raw_chars() if ord(snake_case__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(snake_case__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(snake_case__ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
338
from ...processing_utils import ProcessorMixin class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""] UpperCAmelCase_ : Optional[int] = """TvltImageProcessor""" UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]: super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = image_processor lowerCAmelCase = feature_extractor def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]: if images is None and audio is None: raise ValueError('''You need to specify either an `images` or `audio` input to process.''' ) lowerCAmelCase = None if images is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if images_mixed is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if audio is not None: lowerCAmelCase = self.feature_extractor( __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} if audio is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) if images is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) if images_mixed_dict is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) return output_dict @property def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.image_processor.model_input_names lowerCAmelCase = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
338
1
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class lowercase_ ( UpperCamelCase_ ): """simple docstring""" @require_torch def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched lowerCAmelCase = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' lowerCAmelCase = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' lowerCAmelCase = ''' import socket def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache lowerCAmelCase = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) BertModel.from_pretrained(__SCREAMING_SNAKE_CASE ) BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) pipeline(task='''fill-mask''' , model=__SCREAMING_SNAKE_CASE ) # baseline - just load from_pretrained with normal network lowerCAmelCase = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed lowerCAmelCase = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase = '''1''' lowerCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , check=__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE_ ( self ) ->int: # python one-liner segments # this must be loaded before socket.socket is monkey-patched lowerCAmelCase = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' lowerCAmelCase = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' lowerCAmelCase = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache lowerCAmelCase = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) BertModel.from_pretrained(__SCREAMING_SNAKE_CASE ) BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) pipeline(task='''fill-mask''' , model=__SCREAMING_SNAKE_CASE ) # baseline - just load from_pretrained with normal network lowerCAmelCase = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed lowerCAmelCase = self.get_env() lowerCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , check=__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched lowerCAmelCase = ''' from transformers import BertConfig, BertModel, BertTokenizer ''' lowerCAmelCase = ''' mname = "hf-internal-testing/tiny-random-bert-sharded" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print("success") ''' lowerCAmelCase = ''' import socket def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled") socket.socket = offline_socket ''' # baseline - just load from_pretrained with normal network lowerCAmelCase = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed lowerCAmelCase = self.get_env() lowerCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , check=__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # next emulate no network lowerCAmelCase = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase = '''1''' lowerCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , check=__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = ''' from transformers import pipeline ''' lowerCAmelCase = ''' mname = "hf-internal-testing/tiny-random-bert" pipe = pipeline(model=mname) ''' lowerCAmelCase = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled") socket.socket = offline_socket ''' lowerCAmelCase = self.get_env() lowerCAmelCase = '''1''' lowerCAmelCase = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] lowerCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , check=__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( '''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , ) @require_torch def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = ''' from transformers import AutoModel ''' lowerCAmelCase = ''' mname = "hf-internal-testing/test_dynamic_model" AutoModel.from_pretrained(mname, trust_remote_code=True) print("success") ''' # baseline - just load from_pretrained with normal network lowerCAmelCase = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed lowerCAmelCase = self.get_env() lowerCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , check=__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase = '''1''' lowerCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , check=__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() )
338
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]: lowerCAmelCase = len(snake_case__ ) for i in range(length - 1 ): lowerCAmelCase = i for k in range(i + 1 , snake_case__ ): if collection[k] < collection[least]: lowerCAmelCase = k if least != i: lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least]) return collection if __name__ == "__main__": lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip() lowercase__ : str = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
338
1
from math import factorial class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]: lowerCAmelCase = real if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = [1] * rank else: lowerCAmelCase = rank def __repr__( self ) ->Any: return ( F"{self.real}+" F"{'+'.join(str(__SCREAMING_SNAKE_CASE )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}" ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , __SCREAMING_SNAKE_CASE ) def __add__( self , __SCREAMING_SNAKE_CASE ) ->Dict: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return Dual(self.real + other , self.duals ) lowerCAmelCase = self.duals.copy() lowerCAmelCase = other.duals.copy() if len(__SCREAMING_SNAKE_CASE ) > len(__SCREAMING_SNAKE_CASE ): o_dual.extend([1] * (len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE )) ) elif len(__SCREAMING_SNAKE_CASE ) < len(__SCREAMING_SNAKE_CASE ): s_dual.extend([1] * (len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE )) ) lowerCAmelCase = [] for i in range(len(__SCREAMING_SNAKE_CASE ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , __SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = __add__ def __sub__( self , __SCREAMING_SNAKE_CASE ) ->str: return self + other * -1 def __mul__( self , __SCREAMING_SNAKE_CASE ) ->Any: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , __SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = __mul__ def __truediv__( self , __SCREAMING_SNAKE_CASE ) ->Optional[Any]: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , __SCREAMING_SNAKE_CASE ) raise ValueError def __floordiv__( self , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , __SCREAMING_SNAKE_CASE ) raise ValueError def __pow__( self , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]: if n < 0 or isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError('''power must be a positive integer''' ) if n == 0: return 1 if n == 1: return self lowerCAmelCase = self for _ in range(n - 1 ): x *= self return x def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Tuple: if not callable(snake_case__ ): raise ValueError('''differentiate() requires a function as input for func''' ) if not isinstance(snake_case__ , (float, int) ): raise ValueError('''differentiate() requires a float as input for position''' ) if not isinstance(snake_case__ , snake_case__ ): raise ValueError('''differentiate() requires an int as input for order''' ) lowerCAmelCase = Dual(snake_case__ , 1 ) lowerCAmelCase = func(snake_case__ ) if order == 0: return result.real return result.duals[order - 1] * factorial(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod() def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Dict: return y**2 * y**4 print(differentiate(f, 9, 2))
338
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Union[str, Any]: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , ) return config def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float() model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = False UpperCAmelCase_ : Dict = (EsmForProteinFolding,) if is_torch_available() else () UpperCAmelCase_ : List[Any] = () UpperCAmelCase_ : Tuple = {} if is_torch_available() else {} UpperCAmelCase_ : List[str] = False def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = EsmFoldModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) @unittest.skip('''Does not support attention outputs''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: pass @unittest.skip def SCREAMING_SNAKE_CASE_ ( self ) ->Any: pass @unittest.skip('''Esm does not support embedding resizing''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''Esm does not support embedding resizing''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold does not support passing input embeds!''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass @unittest.skip('''ESMFold does not output hidden states in the normal way.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: pass @unittest.skip('''ESMfold does not output hidden states in the normal way.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold only has one output format.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold does not support input chunking.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: pass @unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold doesn\'t support data parallel.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass @require_torch class lowercase_ ( UpperCamelCase_ ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float() model.eval() lowerCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )['''positions'''] lowerCAmelCase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
338
1
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 lowercase__ : Union[str, Any] = data_utils.TransfoXLTokenizer lowercase__ : Tuple = data_utils.TransfoXLCorpus lowercase__ : Union[str, Any] = data_utils lowercase__ : Union[str, Any] = data_utils def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(snake_case__ , '''rb''' ) as fp: lowerCAmelCase = pickle.load(snake_case__ , encoding='''latin1''' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) lowerCAmelCase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file'''] print(f"Save vocabulary to {pytorch_vocab_dump_path}" ) lowerCAmelCase = corpus.vocab.__dict__ torch.save(snake_case__ , snake_case__ ) lowerCAmelCase = corpus.__dict__ corpus_dict_no_vocab.pop('''vocab''' , snake_case__ ) lowerCAmelCase = pytorch_dump_folder_path + '''/''' + CORPUS_NAME print(f"Save dataset to {pytorch_dataset_dump_path}" ) torch.save(snake_case__ , snake_case__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model lowerCAmelCase = os.path.abspath(snake_case__ ) lowerCAmelCase = os.path.abspath(snake_case__ ) print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." ) # Initialise PyTorch model if transfo_xl_config_file == "": lowerCAmelCase = TransfoXLConfig() else: lowerCAmelCase = TransfoXLConfig.from_json_file(snake_case__ ) print(f"Building PyTorch model from configuration: {config}" ) lowerCAmelCase = TransfoXLLMHeadModel(snake_case__ ) lowerCAmelCase = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model lowerCAmelCase = os.path.join(snake_case__ , snake_case__ ) lowerCAmelCase = os.path.join(snake_case__ , snake_case__ ) print(f"Save PyTorch model to {os.path.abspath(snake_case__ )}" ) torch.save(model.state_dict() , snake_case__ ) print(f"Save configuration file to {os.path.abspath(snake_case__ )}" ) with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowercase__ : str = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) lowercase__ : Optional[int] = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
338
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""] UpperCAmelCase_ : int = """OwlViTImageProcessor""" UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any: lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __SCREAMING_SNAKE_CASE , ) lowerCAmelCase = kwargs.pop('''feature_extractor''' ) lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int: if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )): lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )] elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = [] # Maximum number of queries across batch lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__SCREAMING_SNAKE_CASE ) != max_num_queries: lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE )) lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) encodings.append(__SCREAMING_SNAKE_CASE ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) lowerCAmelCase = BatchEncoding() lowerCAmelCase = input_ids lowerCAmelCase = attention_mask if query_images is not None: lowerCAmelCase = BatchEncoding() lowerCAmelCase = self.image_processor( __SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values lowerCAmelCase = query_pixel_values if images is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is not None and images is not None: lowerCAmelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCAmelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]: return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any: return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple: return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str: return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]: return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE_ ( self ) ->int: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor
338
1
from ...configuration_utils import PretrainedConfig lowercase__ : Any = { '''google/tapas-base-finetuned-sqa''': ( '''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wtq''': ( '''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wikisql-supervised''': ( '''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json''' ), '''google/tapas-base-finetuned-tabfact''': ( '''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json''' ), } class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : str = """tapas""" def __init__( self , __SCREAMING_SNAKE_CASE=30522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=[3, 256, 256, 2, 256, 256, 10] , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1_0.0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="ratio" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) ->str: super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_sizes lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps # Fine-tuning task hyperparameters lowerCAmelCase = positive_label_weight lowerCAmelCase = num_aggregation_labels lowerCAmelCase = aggregation_loss_weight lowerCAmelCase = use_answer_as_supervision lowerCAmelCase = answer_loss_importance lowerCAmelCase = use_normalized_answer_loss lowerCAmelCase = huber_loss_delta lowerCAmelCase = temperature lowerCAmelCase = aggregation_temperature lowerCAmelCase = use_gumbel_for_cells lowerCAmelCase = use_gumbel_for_aggregation lowerCAmelCase = average_approximation_function lowerCAmelCase = cell_selection_preference lowerCAmelCase = answer_loss_cutoff lowerCAmelCase = max_num_rows lowerCAmelCase = max_num_columns lowerCAmelCase = average_logits_per_cell lowerCAmelCase = select_one_column lowerCAmelCase = allow_empty_column_selection lowerCAmelCase = init_cell_selection_weights_to_zero lowerCAmelCase = reset_position_index_per_cell lowerCAmelCase = disable_per_token_loss # Aggregation hyperparameters lowerCAmelCase = aggregation_labels lowerCAmelCase = no_aggregation_label_index if isinstance(self.aggregation_labels , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = {int(__SCREAMING_SNAKE_CASE ): v for k, v in aggregation_labels.items()}
338
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase__ : List[Any] = logging.get_logger(__name__) lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''} lowercase__ : Optional[int] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } lowercase__ : Any = { '''albert-base-v1''': 5_1_2, '''albert-large-v1''': 5_1_2, '''albert-xlarge-v1''': 5_1_2, '''albert-xxlarge-v1''': 5_1_2, '''albert-base-v2''': 5_1_2, '''albert-large-v2''': 5_1_2, '''albert-xlarge-v2''': 5_1_2, '''albert-xxlarge-v2''': 5_1_2, } lowercase__ : Tuple = '''▁''' class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase = ( AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token ) lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = do_lower_case lowerCAmelCase = remove_space lowerCAmelCase = keep_accents lowerCAmelCase = vocab_file lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->Any: return len(self.sp_model ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) ->int: lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any: if self.remove_space: lowerCAmelCase = ''' '''.join(inputs.strip().split() ) else: lowerCAmelCase = inputs lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] ) if self.do_lower_case: lowerCAmelCase = outputs.lower() return outputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] for piece in pieces: if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCAmelCase = cur_pieces[1:] else: lowerCAmelCase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__SCREAMING_SNAKE_CASE ) else: new_pieces.append(__SCREAMING_SNAKE_CASE ) return new_pieces def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int: return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int: return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]: lowerCAmelCase = [] lowerCAmelCase = '''''' lowerCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token lowerCAmelCase = True lowerCAmelCase = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string.strip() def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is not None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
338
1
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowercase_ ( unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = JukeboxTokenizer UpperCAmelCase_ : Any = { """artist""": """Zac Brown Band""", """genres""": """Country""", """lyrics""": """I met a traveller from an antique land, Who said \"Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away """, } @require_torch def SCREAMING_SNAKE_CASE_ ( self ) ->Any: import torch lowerCAmelCase = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' ) lowerCAmelCase = tokenizer(**self.metas )['''input_ids'''] # fmt: off lowerCAmelCase = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: import torch lowerCAmelCase = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' ) lowerCAmelCase = tokenizer(**self.metas )['''input_ids'''] # fmt: off lowerCAmelCase = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
338
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,) UpperCAmelCase_ : int = (("""num_inference_steps""", 25),) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str: lowerCAmelCase = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, } config.update(**__SCREAMING_SNAKE_CASE ) return config def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCAmelCase , lowerCAmelCase = sample, sample for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]: if scheduler is None: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = 10 lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample return sample def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ): lowerCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] lowerCAmelCase = scheduler.timesteps[5] lowerCAmelCase = scheduler.timesteps[6] lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: # make sure that iterating over schedulers with same config names gives same results # for defaults lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() ) lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = self.full_loop( solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , ) assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.full_loop() lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = 10 lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa
338
1
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Any = IFInpaintingSuperResolutionPipeline UpperCAmelCase_ : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} UpperCAmelCase_ : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} ) UpperCAmelCase_ : int = PipelineTesterMixin.required_optional_params - {"""latents"""} def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: return self._get_superresolution_dummy_components() def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) ->Any: if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ): lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: self._test_save_load_local() def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
338
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowercase_ ( unittest.TestCase ): """simple docstring""" @property def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: torch.manual_seed(0 ) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = self.dummy_uncond_unet lowerCAmelCase = KarrasVeScheduler() lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = '''google/ncsnpp-celebahq-256''' lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = KarrasVeScheduler() lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
338
1
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class lowercase_ ( unittest.TestCase ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 32 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 255 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __SCREAMING_SNAKE_CASE = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=3 , ) ->int: lowerCAmelCase = parent lowerCAmelCase = do_resize lowerCAmelCase = size if size is not None else {'''shortest_edge''': 288} lowerCAmelCase = size_divisor lowerCAmelCase = do_rescale lowerCAmelCase = rescale_factor lowerCAmelCase = do_normalize lowerCAmelCase = do_center_crop lowerCAmelCase = image_mean lowerCAmelCase = image_std lowerCAmelCase = do_pad lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) ->Optional[Any]: if not batched: lowerCAmelCase = self.size['''shortest_edge'''] lowerCAmelCase = image_inputs[0] if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ): lowerCAmelCase , lowerCAmelCase = image.size else: lowerCAmelCase , lowerCAmelCase = image.shape[1], image.shape[2] lowerCAmelCase = size / min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if h < w: lowerCAmelCase , lowerCAmelCase = size, scale * w else: lowerCAmelCase , lowerCAmelCase = scale * h, size lowerCAmelCase = int((1333 / 800) * size ) if max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) > max_size: lowerCAmelCase = max_size / max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = newh * scale lowerCAmelCase = neww * scale lowerCAmelCase , lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 ) lowerCAmelCase , lowerCAmelCase = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: lowerCAmelCase = [] for image in image_inputs: lowerCAmelCase , lowerCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCAmelCase = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0] lowerCAmelCase = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowercase_ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Dict = BridgeTowerImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = BridgeTowerImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size_divisor''' ) ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: pass def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: # Initialize image processor lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: # Initialize image processor lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: # Initialize image processor lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
338
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch lowercase__ : Dict = logging.get_logger(__name__) @add_end_docstrings( UpperCamelCase_ , r""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ , ) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray: if self.framework == "tf": lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ) else: raise ValueError('''Unsupported framework''' ) return masked_index def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray: lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]: if return_tensors is None: lowerCAmelCase = self.framework lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model_inputs['''input_ids'''] return model_outputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: lowerCAmelCase = target_ids.shape[0] lowerCAmelCase = model_outputs['''input_ids'''][0] lowerCAmelCase = model_outputs['''logits'''] if self.framework == "tf": lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] lowerCAmelCase = outputs.numpy() lowerCAmelCase = outputs[0, masked_index, :] lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) if target_ids is not None: lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) ) lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 ) lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy() else: lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample lowerCAmelCase = outputs[0, masked_index, :] lowerCAmelCase = logits.softmax(dim=-1 ) if target_ids is not None: lowerCAmelCase = probs[..., target_ids] lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] lowerCAmelCase = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): lowerCAmelCase = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place lowerCAmelCase = input_ids.numpy().copy() if target_ids is not None: lowerCAmelCase = target_ids[p].tolist() lowerCAmelCase = p # Filter padding out: lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence} row.append(__SCREAMING_SNAKE_CASE ) result.append(__SCREAMING_SNAKE_CASE ) if single_mask: return result[0] return result def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = [targets] try: lowerCAmelCase = self.tokenizer.get_vocab() except Exception: lowerCAmelCase = {} lowerCAmelCase = [] for target in targets: lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if id_ is None: lowerCAmelCase = self.tokenizer( __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids'''] if len(__SCREAMING_SNAKE_CASE ) == 0: logger.warning( F"The specified target token `{target}` does not exist in the model vocabulary. " '''We cannot replace it with anything meaningful, ignoring it''' ) continue lowerCAmelCase = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"The specified target token `{target}` does not exist in the model vocabulary. " F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." ) target_ids.append(id_ ) lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) ) if len(__SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) return target_ids def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict: lowerCAmelCase = {} if targets is not None: lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = target_ids if top_k is not None: lowerCAmelCase = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]: lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1: return outputs[0] return outputs
338
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ : Optional[Any] = { '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Dict = [ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
338
from typing import TYPE_CHECKING from ...utils import _LazyModule lowercase__ : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
338
1
import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Any: random.seed(snake_case__ ) np.random.seed(snake_case__ ) torch.manual_seed(snake_case__ ) torch.cuda.manual_seed_all(snake_case__ ) # ^^ safe to call this function even if cuda is not available class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.9_9_9_9 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = 2 / 3 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->Dict: if isinstance(__SCREAMING_SNAKE_CASE , torch.nn.Module ): lowerCAmelCase = ( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility lowerCAmelCase = True if kwargs.get('''max_value''' , __SCREAMING_SNAKE_CASE ) is not None: lowerCAmelCase = '''The `max_value` argument is deprecated. Please use `decay` instead.''' deprecate('''max_value''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = kwargs['''max_value'''] if kwargs.get('''min_value''' , __SCREAMING_SNAKE_CASE ) is not None: lowerCAmelCase = '''The `min_value` argument is deprecated. Please use `min_decay` instead.''' deprecate('''min_value''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = kwargs['''min_value'''] lowerCAmelCase = list(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [p.clone().detach() for p in parameters] if kwargs.get('''device''' , __SCREAMING_SNAKE_CASE ) is not None: lowerCAmelCase = '''The `device` argument is deprecated. Please use `to` instead.''' deprecate('''device''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE ) self.to(device=kwargs['''device'''] ) lowerCAmelCase = None lowerCAmelCase = decay lowerCAmelCase = min_decay lowerCAmelCase = update_after_step lowerCAmelCase = use_ema_warmup lowerCAmelCase = inv_gamma lowerCAmelCase = power lowerCAmelCase = 0 lowerCAmelCase = None # set in `step()` lowerCAmelCase = model_cls lowerCAmelCase = model_config @classmethod def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->"EMAModel": lowerCAmelCase , lowerCAmelCase = model_cls.load_config(__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model_cls.from_pretrained(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = cls(model.parameters() , model_cls=__SCREAMING_SNAKE_CASE , model_config=model.config ) ema_model.load_state_dict(__SCREAMING_SNAKE_CASE ) return ema_model def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple: if self.model_cls is None: raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' ) if self.model_config is None: raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' ) lowerCAmelCase = self.model_cls.from_config(self.model_config ) lowerCAmelCase = self.state_dict() state_dict.pop('''shadow_params''' , __SCREAMING_SNAKE_CASE ) model.register_to_config(**__SCREAMING_SNAKE_CASE ) self.copy_to(model.parameters() ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->float: lowerCAmelCase = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: lowerCAmelCase = 1 - (1 + step / self.inv_gamma) ** -self.power else: lowerCAmelCase = (1 + step) / (10 + step) lowerCAmelCase = min(__SCREAMING_SNAKE_CASE , self.decay ) # make sure decay is not smaller than min_decay lowerCAmelCase = max(__SCREAMING_SNAKE_CASE , self.min_decay ) return cur_decay_value @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[Any]: if isinstance(__SCREAMING_SNAKE_CASE , torch.nn.Module ): lowerCAmelCase = ( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = parameters.parameters() lowerCAmelCase = list(__SCREAMING_SNAKE_CASE ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. lowerCAmelCase = self.get_decay(self.optimization_step ) lowerCAmelCase = decay lowerCAmelCase = 1 - decay lowerCAmelCase = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , __SCREAMING_SNAKE_CASE ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): lowerCAmelCase = deepspeed.zero.GatheredParameters(__SCREAMING_SNAKE_CASE , modifier_rank=__SCREAMING_SNAKE_CASE ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->None: lowerCAmelCase = list(__SCREAMING_SNAKE_CASE ) for s_param, param in zip(self.shadow_params , __SCREAMING_SNAKE_CASE ): param.data.copy_(s_param.to(param.device ).data ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->None: lowerCAmelCase = [ p.to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ) if p.is_floating_point() else p.to(device=__SCREAMING_SNAKE_CASE ) for p in self.shadow_params ] def SCREAMING_SNAKE_CASE_ ( self ) ->dict: return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->None: lowerCAmelCase = [param.detach().cpu().clone() for param in parameters] def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->None: if self.temp_stored_params is None: raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' ) for c_param, param in zip(self.temp_stored_params , __SCREAMING_SNAKE_CASE ): param.data.copy_(c_param.data ) # Better memory-wise. lowerCAmelCase = None def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->None: lowerCAmelCase = copy.deepcopy(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = state_dict.get('''decay''' , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('''Decay must be between 0 and 1''' ) lowerCAmelCase = state_dict.get('''min_decay''' , self.min_decay ) if not isinstance(self.min_decay , __SCREAMING_SNAKE_CASE ): raise ValueError('''Invalid min_decay''' ) lowerCAmelCase = state_dict.get('''optimization_step''' , self.optimization_step ) if not isinstance(self.optimization_step , __SCREAMING_SNAKE_CASE ): raise ValueError('''Invalid optimization_step''' ) lowerCAmelCase = state_dict.get('''update_after_step''' , self.update_after_step ) if not isinstance(self.update_after_step , __SCREAMING_SNAKE_CASE ): raise ValueError('''Invalid update_after_step''' ) lowerCAmelCase = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , __SCREAMING_SNAKE_CASE ): raise ValueError('''Invalid use_ema_warmup''' ) lowerCAmelCase = state_dict.get('''inv_gamma''' , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError('''Invalid inv_gamma''' ) lowerCAmelCase = state_dict.get('''power''' , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError('''Invalid power''' ) lowerCAmelCase = state_dict.get('''shadow_params''' , __SCREAMING_SNAKE_CASE ) if shadow_params is not None: lowerCAmelCase = shadow_params if not isinstance(self.shadow_params , __SCREAMING_SNAKE_CASE ): raise ValueError('''shadow_params must be a list''' ) if not all(isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) for p in self.shadow_params ): raise ValueError('''shadow_params must all be Tensors''' )
338
lowercase__ : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def SCREAMING_SNAKE_CASE_ ( ) -> None: lowerCAmelCase = input('''Enter message: ''' ) lowerCAmelCase = input('''Enter key [alphanumeric]: ''' ) lowerCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' ) if mode.lower().startswith('''e''' ): lowerCAmelCase = '''encrypt''' lowerCAmelCase = encrypt_message(snake_case__ , snake_case__ ) elif mode.lower().startswith('''d''' ): lowerCAmelCase = '''decrypt''' lowerCAmelCase = decrypt_message(snake_case__ , snake_case__ ) print(f"\n{mode.title()}ed message:" ) print(snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: return translate_message(snake_case__ , snake_case__ , '''encrypt''' ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: return translate_message(snake_case__ , snake_case__ , '''decrypt''' ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str: lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = key.upper() for symbol in message: lowerCAmelCase = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(snake_case__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(snake_case__ ): lowerCAmelCase = 0 else: translated.append(snake_case__ ) return "".join(snake_case__ ) if __name__ == "__main__": main()
338
1
from __future__ import annotations lowercase__ : Optional[int] = tuple[int, int, int] lowercase__ : str = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase lowercase__ : Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' # -------------------------- default selection -------------------------- # rotors -------------------------- lowercase__ : Tuple = '''EGZWVONAHDCLFQMSIPJBYUKXTR''' lowercase__ : Optional[Any] = '''FOBHMDKEXQNRAULPGSJVTYICZW''' lowercase__ : Tuple = '''ZJXESIUQLHAVRMDOYGTNFWPBKC''' # reflector -------------------------- lowercase__ : List[str] = { '''A''': '''N''', '''N''': '''A''', '''B''': '''O''', '''O''': '''B''', '''C''': '''P''', '''P''': '''C''', '''D''': '''Q''', '''Q''': '''D''', '''E''': '''R''', '''R''': '''E''', '''F''': '''S''', '''S''': '''F''', '''G''': '''T''', '''T''': '''G''', '''H''': '''U''', '''U''': '''H''', '''I''': '''V''', '''V''': '''I''', '''J''': '''W''', '''W''': '''J''', '''K''': '''X''', '''X''': '''K''', '''L''': '''Y''', '''Y''': '''L''', '''M''': '''Z''', '''Z''': '''M''', } # -------------------------- extra rotors -------------------------- lowercase__ : Dict = '''RMDJXFUWGISLHVTCQNKYPBEZOA''' lowercase__ : Optional[Any] = '''SGLCPQWZHKXAREONTFBVIYJUDM''' lowercase__ : Any = '''HVSICLTYKQUBXDWAJZOMFGPREN''' lowercase__ : Union[str, Any] = '''RZWQHFMVDBKICJLNTUXAGYPSOE''' lowercase__ : Dict = '''LFKIJODBEGAMQPXVUHYSTCZRWN''' lowercase__ : Optional[int] = '''KOAEGVDHXPQZMLFTYWJNBRCIUS''' def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: # Checks if there are 3 unique rotors if (unique_rotsel := len(set(snake_case__ ) )) < 3: lowerCAmelCase = f"Please use 3 unique rotors (not {unique_rotsel})" raise Exception(snake_case__ ) # Checks if rotor positions are valid lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = rotpos if not 0 < rotorposa <= len(snake_case__ ): lowerCAmelCase = f"First rotor position is not within range of 1..26 ({rotorposa}" raise ValueError(snake_case__ ) if not 0 < rotorposa <= len(snake_case__ ): lowerCAmelCase = f"Second rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(snake_case__ ) if not 0 < rotorposa <= len(snake_case__ ): lowerCAmelCase = f"Third rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(snake_case__ ) # Validates string and returns dict lowerCAmelCase = _plugboard(snake_case__ ) return rotpos, rotsel, pbdict def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> dict[str, str]: # tests the input string if it # a) is type string # b) has even length (so pairs can be made) if not isinstance(snake_case__ , snake_case__ ): lowerCAmelCase = f"Plugboard setting isn't type string ({type(snake_case__ )})" raise TypeError(snake_case__ ) elif len(snake_case__ ) % 2 != 0: lowerCAmelCase = f"Odd number of symbols ({len(snake_case__ )})" raise Exception(snake_case__ ) elif pbstring == "": return {} pbstring.replace(''' ''' , '''''' ) # Checks if all characters are unique lowerCAmelCase = set() for i in pbstring: if i not in abc: lowerCAmelCase = f"'{i}' not in list of symbols" raise Exception(snake_case__ ) elif i in tmppbl: lowerCAmelCase = f"Duplicate symbol ({i})" raise Exception(snake_case__ ) else: tmppbl.add(snake_case__ ) del tmppbl # Created the dictionary lowerCAmelCase = {} for j in range(0 , len(snake_case__ ) - 1 , 2 ): lowerCAmelCase = pbstring[j + 1] lowerCAmelCase = pbstring[j] return pb def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ = (rotora, rotora, rotora) , snake_case__ = "" , ) -> str: lowerCAmelCase = text.upper() lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = _validator( snake_case__ , snake_case__ , plugb.upper() ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = rotor_position lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 lowerCAmelCase = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: lowerCAmelCase = plugboard[symbol] # rotor ra -------------------------- lowerCAmelCase = abc.index(snake_case__ ) + rotorposa lowerCAmelCase = rotora[index % len(snake_case__ )] # rotor rb -------------------------- lowerCAmelCase = abc.index(snake_case__ ) + rotorposa lowerCAmelCase = rotora[index % len(snake_case__ )] # rotor rc -------------------------- lowerCAmelCase = abc.index(snake_case__ ) + rotorposa lowerCAmelCase = rotora[index % len(snake_case__ )] # reflector -------------------------- # this is the reason you don't need another machine to decipher lowerCAmelCase = reflector[symbol] # 2nd rotors lowerCAmelCase = abc[rotora.index(snake_case__ ) - rotorposa] lowerCAmelCase = abc[rotora.index(snake_case__ ) - rotorposa] lowerCAmelCase = abc[rotora.index(snake_case__ ) - rotorposa] # 2nd plugboard if symbol in plugboard: lowerCAmelCase = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(snake_case__ ): lowerCAmelCase = 0 rotorposa += 1 if rotorposa >= len(snake_case__ ): lowerCAmelCase = 0 rotorposa += 1 if rotorposa >= len(snake_case__ ): lowerCAmelCase = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(snake_case__ ) return "".join(snake_case__ ) if __name__ == "__main__": lowercase__ : Any = '''This is my Python script that emulates the Enigma machine from WWII.''' lowercase__ : str = (1, 1, 1) lowercase__ : Any = '''pictures''' lowercase__ : Tuple = (rotora, rotora, rotora) lowercase__ : List[Any] = enigma(message, rotor_pos, rotor_sel, pb) print('''Encrypted message:''', en) print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
338
from collections import defaultdict from math import ceil, sqrt def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int: lowerCAmelCase = defaultdict(snake_case__ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: lowerCAmelCase = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: lowerCAmelCase = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(snake_case__ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(f'{solution() = }')
338
1
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> int: return 1 if input_a == input_a else 0 def SCREAMING_SNAKE_CASE_ ( ) -> None: assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
338
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]: assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = features.copy() if features else default_expected_features lowerCAmelCase = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: if issubclass(snake_case__ , snake_case__ ): lowerCAmelCase = text_path elif issubclass(snake_case__ , snake_case__ ): lowerCAmelCase = [text_path] lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]: assert isinstance(snake_case__ , snake_case__ ) for split in splits: lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: lowerCAmelCase = tmp_path / '''cache''' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = features.copy() if features else default_expected_features lowerCAmelCase = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any: if split: lowerCAmelCase = {split: text_path} else: lowerCAmelCase = '''train''' lowerCAmelCase = {'''train''': text_path, '''test''': text_path} lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
338
1
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 lowercase__ : Union[str, Any] = sys.version_info >= (3, 1_0) def SCREAMING_SNAKE_CASE_ ( snake_case__=None , snake_case__=None ) -> Any: return field(default_factory=lambda: default , metadata=snake_case__ ) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : int UpperCAmelCase_ : float UpperCAmelCase_ : str UpperCAmelCase_ : bool @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : int = 42 UpperCAmelCase_ : str = field(default="""toto""" , metadata={"""help""": """help message"""} ) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : bool = False UpperCAmelCase_ : bool = True UpperCAmelCase_ : Optional[bool] = None class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = """titi""" UpperCAmelCase_ : int = """toto""" class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = """titi""" UpperCAmelCase_ : int = """toto""" UpperCAmelCase_ : Dict = 42 @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : BasicEnum = "toto" def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = BasicEnum(self.foo ) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : MixedTypeEnum = "toto" def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = MixedTypeEnum(self.foo ) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : Optional[int] = None UpperCAmelCase_ : Optional[float] = field(default=UpperCamelCase_ , metadata={"""help""": """help message"""} ) UpperCAmelCase_ : Optional[str] = None UpperCAmelCase_ : Optional[List[str]] = list_field(default=[] ) UpperCAmelCase_ : Optional[List[int]] = list_field(default=[] ) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : List[int] = list_field(default=[] ) UpperCAmelCase_ : List[int] = list_field(default=[1, 2, 3] ) UpperCAmelCase_ : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] ) UpperCAmelCase_ : List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : List[int] = field() UpperCAmelCase_ : str = field() UpperCAmelCase_ : BasicEnum = field() def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = BasicEnum(self.required_enum ) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : int UpperCAmelCase_ : "BasicEnum" = field() UpperCAmelCase_ : "Optional[bool]" = None UpperCAmelCase_ : "str" = field(default="""toto""" , metadata={"""help""": """help message"""} ) UpperCAmelCase_ : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] ) if is_python_no_less_than_3_10: @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : bool = False UpperCAmelCase_ : bool = True UpperCAmelCase_ : bool | None = None @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : int | None = None UpperCAmelCase_ : float | None = field(default=UpperCamelCase_ , metadata={"""help""": """help message"""} ) UpperCAmelCase_ : str | None = None UpperCAmelCase_ : list[str] | None = list_field(default=[] ) UpperCAmelCase_ : list[int] | None = list_field(default=[] ) class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->int: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): lowerCAmelCase = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != '''container'''} lowerCAmelCase = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , __SCREAMING_SNAKE_CASE ) and yy.get('''choices''' , __SCREAMING_SNAKE_CASE ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](__SCREAMING_SNAKE_CASE ) , yy['''type'''](__SCREAMING_SNAKE_CASE ) ) del xx["type"], yy["type"] self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--bar''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--baz''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--flag''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((lowerCAmelCase) , ) = parser.parse_args_into_dataclasses(__SCREAMING_SNAKE_CASE , look_for_args_file=__SCREAMING_SNAKE_CASE ) self.assertFalse(example.flag ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--baz''' , default='''toto''' , type=__SCREAMING_SNAKE_CASE , help='''help message''' ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' ) expected.add_argument('''--baz''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=__SCREAMING_SNAKE_CASE , dest='''baz''' ) expected.add_argument('''--opt''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__SCREAMING_SNAKE_CASE ) for dataclass_type in dataclass_types: lowerCAmelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) lowerCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) lowerCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) lowerCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) lowerCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) lowerCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : Literal["titi", "toto", 42] = "toto" lowerCAmelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) lowerCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) lowerCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__SCREAMING_SNAKE_CASE ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual( __SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) lowerCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--bar''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help='''help message''' ) expected.add_argument('''--baz''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__SCREAMING_SNAKE_CASE ) for dataclass_type in dataclass_types: lowerCAmelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , bar=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , ces=[] , des=[] ) ) lowerCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--required_str''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__SCREAMING_SNAKE_CASE , ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__SCREAMING_SNAKE_CASE , ) expected.add_argument('''--opt''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--baz''' , default='''toto''' , type=__SCREAMING_SNAKE_CASE , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__SCREAMING_SNAKE_CASE ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = { '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } lowerCAmelCase = parser.parse_dict(__SCREAMING_SNAKE_CASE )[0] lowerCAmelCase = BasicExample(**__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = { '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(__SCREAMING_SNAKE_CASE , parser.parse_dict , __SCREAMING_SNAKE_CASE , allow_extra_keys=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = { '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , '''temp_json''' ) os.mkdir(__SCREAMING_SNAKE_CASE ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] lowerCAmelCase = BasicExample(**__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = { '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , '''temp_yaml''' ) os.mkdir(__SCREAMING_SNAKE_CASE ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] lowerCAmelCase = BasicExample(**__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
338
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: if isinstance(snake_case__ , snake_case__ ): raise TypeError('''\'float\' object cannot be interpreted as an integer''' ) if isinstance(snake_case__ , snake_case__ ): raise TypeError('''\'str\' object cannot be interpreted as an integer''' ) if num == 0: return "0b0" lowerCAmelCase = False if num < 0: lowerCAmelCase = True lowerCAmelCase = -num lowerCAmelCase = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(snake_case__ ) for e in binary ) return "0b" + "".join(str(snake_case__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
338
1
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowercase_ ( nn.Module ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE = 16 , __SCREAMING_SNAKE_CASE = 88 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 32 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "geglu" , __SCREAMING_SNAKE_CASE = None , ) ->int: super().__init__() lowerCAmelCase = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , cross_attention_dim=__SCREAMING_SNAKE_CASE , attention_bias=__SCREAMING_SNAKE_CASE , sample_size=__SCREAMING_SNAKE_CASE , num_vector_embeds=__SCREAMING_SNAKE_CASE , activation_fn=__SCREAMING_SNAKE_CASE , num_embeds_ada_norm=__SCREAMING_SNAKE_CASE , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference lowerCAmelCase = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` lowerCAmelCase = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` lowerCAmelCase = [1, 0] def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = True , ) ->Tuple: lowerCAmelCase = hidden_states lowerCAmelCase = [] lowerCAmelCase = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens lowerCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] lowerCAmelCase = self.transformer_index_for_condition[i] lowerCAmelCase = self.transformers[transformer_index]( __SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , cross_attention_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] lowerCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) lowerCAmelCase = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__SCREAMING_SNAKE_CASE )
338
class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any: lowerCAmelCase = name lowerCAmelCase = value lowerCAmelCase = weight def __repr__( self ) ->str: return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: return self.value def SCREAMING_SNAKE_CASE_ ( self ) ->int: return self.name def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: return self.weight def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: return self.value / self.weight def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int: lowerCAmelCase = [] for i in range(len(snake_case__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ ) lowerCAmelCase = [] lowerCAmelCase , lowerCAmelCase = 0.0, 0.0 for i in range(len(snake_case__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: pass if __name__ == "__main__": import doctest doctest.testmod()
338
1
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch lowercase__ : Dict = logging.get_logger(__name__) @add_end_docstrings( UpperCamelCase_ , r""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ , ) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray: if self.framework == "tf": lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ) else: raise ValueError('''Unsupported framework''' ) return masked_index def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray: lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]: if return_tensors is None: lowerCAmelCase = self.framework lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model_inputs['''input_ids'''] return model_outputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: lowerCAmelCase = target_ids.shape[0] lowerCAmelCase = model_outputs['''input_ids'''][0] lowerCAmelCase = model_outputs['''logits'''] if self.framework == "tf": lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] lowerCAmelCase = outputs.numpy() lowerCAmelCase = outputs[0, masked_index, :] lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) if target_ids is not None: lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) ) lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 ) lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy() else: lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample lowerCAmelCase = outputs[0, masked_index, :] lowerCAmelCase = logits.softmax(dim=-1 ) if target_ids is not None: lowerCAmelCase = probs[..., target_ids] lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] lowerCAmelCase = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): lowerCAmelCase = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place lowerCAmelCase = input_ids.numpy().copy() if target_ids is not None: lowerCAmelCase = target_ids[p].tolist() lowerCAmelCase = p # Filter padding out: lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence} row.append(__SCREAMING_SNAKE_CASE ) result.append(__SCREAMING_SNAKE_CASE ) if single_mask: return result[0] return result def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = [targets] try: lowerCAmelCase = self.tokenizer.get_vocab() except Exception: lowerCAmelCase = {} lowerCAmelCase = [] for target in targets: lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if id_ is None: lowerCAmelCase = self.tokenizer( __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids'''] if len(__SCREAMING_SNAKE_CASE ) == 0: logger.warning( F"The specified target token `{target}` does not exist in the model vocabulary. " '''We cannot replace it with anything meaningful, ignoring it''' ) continue lowerCAmelCase = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"The specified target token `{target}` does not exist in the model vocabulary. " F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." ) target_ids.append(id_ ) lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) ) if len(__SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) return target_ids def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict: lowerCAmelCase = {} if targets is not None: lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = target_ids if top_k is not None: lowerCAmelCase = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]: lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1: return outputs[0] return outputs
338
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). lowercase__ : Optional[int] = [0, 2_5, 5_0] lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5] lowercase__ : int = fuzz.membership.trimf(X, abca) lowercase__ : Tuple = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. lowercase__ : List[str] = np.ones(7_5) lowercase__ : Any = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] lowercase__ : Any = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) lowercase__ : str = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('''Young''') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('''Middle aged''') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('''union''') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('''intersection''') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('''complement_a''') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('''difference a/b''') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('''alg_sum''') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('''alg_product''') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('''bdd_sum''') plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title('''bdd_difference''') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
338
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase__ : Dict = { '''configuration_efficientformer''': [ '''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientFormerConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : List[Any] = ['''EfficientFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : str = [ '''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientFormerForImageClassification''', '''EfficientFormerForImageClassificationWithTeacher''', '''EfficientFormerModel''', '''EfficientFormerPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : str = [ '''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFEfficientFormerForImageClassification''', '''TFEfficientFormerForImageClassificationWithTeacher''', '''TFEfficientFormerModel''', '''TFEfficientFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
338
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : str = (DDPMScheduler,) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]: lowerCAmelCase = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__SCREAMING_SNAKE_CASE ) return config def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ): if i == len(__SCREAMING_SNAKE_CASE ) - 1: lowerCAmelCase = -1 else: lowerCAmelCase = timesteps[i + 1] lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 1, 0] lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
338
1
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Union[str, Any]: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , ) return config def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float() model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = False UpperCAmelCase_ : Dict = (EsmForProteinFolding,) if is_torch_available() else () UpperCAmelCase_ : List[Any] = () UpperCAmelCase_ : Tuple = {} if is_torch_available() else {} UpperCAmelCase_ : List[str] = False def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = EsmFoldModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) @unittest.skip('''Does not support attention outputs''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: pass @unittest.skip def SCREAMING_SNAKE_CASE_ ( self ) ->Any: pass @unittest.skip('''Esm does not support embedding resizing''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''Esm does not support embedding resizing''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold does not support passing input embeds!''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass @unittest.skip('''ESMFold does not output hidden states in the normal way.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: pass @unittest.skip('''ESMfold does not output hidden states in the normal way.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold only has one output format.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold does not support input chunking.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: pass @unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold doesn\'t support data parallel.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass @require_torch class lowercase_ ( UpperCamelCase_ ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float() model.eval() lowerCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )['''positions'''] lowerCAmelCase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
338
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase__ : str = logging.get_logger(__name__) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Any = """AutoTokenizer""" UpperCAmelCase_ : Optional[int] = ["""tokenizer"""] UpperCAmelCase_ : str = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]: super().__init__(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = speaker_embeddings @classmethod def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple: if speaker_embeddings_dict_path is not None: lowerCAmelCase = get_file_from_repo( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , ) if speaker_embeddings_path is None: logger.warning( F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." ) lowerCAmelCase = None else: with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json: lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = None lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int: if self.speaker_embeddings is not None: os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} lowerCAmelCase = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" ) lowerCAmelCase = tmp_dict with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = self.speaker_embeddings[voice_preset] lowerCAmelCase = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." ) lowerCAmelCase = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , ) if path is None: raise ValueError( F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." ) lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE ) return voice_preset_dict def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple: for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F"Voice preset unrecognized, missing {key} as a key." ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int: if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if ( isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE ) else: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ): lowerCAmelCase = voice_preset + '''.npz''' lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE ) if voice_preset is not None: self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.tokenizer( __SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) if voice_preset is not None: lowerCAmelCase = voice_preset return encoded_text
338
1
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration lowercase__ : Any = [ # tf -> hf ('''/''', '''.'''), ('''layer_''', '''layers.'''), ('''kernel''', '''weight'''), ('''beta''', '''bias'''), ('''gamma''', '''weight'''), ('''pegasus''', '''model'''), ] lowercase__ : Dict = [ ('''.output.dense''', '''.fc2'''), ('''intermediate.LayerNorm''', '''final_layer_norm'''), ('''intermediate.dense''', '''fc1'''), ] lowercase__ : Optional[int] = ( INIT_COMMON + [ ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.out_proj'''), ('''attention.self''', '''self_attn'''), ('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''), ('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''), ('''attention.encdec''', '''encoder_attn'''), ('''key''', '''k_proj'''), ('''value''', '''v_proj'''), ('''query''', '''q_proj'''), ('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''), ] + END_COMMON ) lowercase__ : List[Any] = ( INIT_COMMON + [ ('''embeddings.word_embeddings''', '''shared.weight'''), ('''embeddings.position_embeddings''', '''embed_positions.weight'''), ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.output'''), ('''attention.self''', '''self_attn.self'''), ('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''), ] + END_COMMON ) lowercase__ : List[Any] = [ '''encdec/key/bias''', '''encdec/query/bias''', '''encdec/value/bias''', '''self/key/bias''', '''self/query/bias''', '''self/value/bias''', '''encdec_output/dense/bias''', '''attention/output/dense/bias''', ] def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Optional[int]: for tf_name, hf_name in patterns: lowerCAmelCase = k.replace(snake_case__ , snake_case__ ) return k def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> BigBirdPegasusForConditionalGeneration: lowerCAmelCase = BigBirdPegasusConfig(**snake_case__ ) lowerCAmelCase = BigBirdPegasusForConditionalGeneration(snake_case__ ) lowerCAmelCase = torch_model.state_dict() lowerCAmelCase = {} # separating decoder weights lowerCAmelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} lowerCAmelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ): lowerCAmelCase = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE] if any(snake_case__ ): continue lowerCAmelCase = DECODER_PATTERNS lowerCAmelCase = rename_state_dict_key(snake_case__ , snake_case__ ) if new_k not in state_dict: raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): lowerCAmelCase = v.T lowerCAmelCase = torch.from_numpy(snake_case__ ) assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}" for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ): lowerCAmelCase = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE] if any(snake_case__ ): continue lowerCAmelCase = REMAINING_PATTERNS lowerCAmelCase = rename_state_dict_key(snake_case__ , snake_case__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): lowerCAmelCase = v.T lowerCAmelCase = torch.from_numpy(snake_case__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}" lowerCAmelCase = mapping['''model.embed_positions.weight'''] lowerCAmelCase = mapping.pop('''model.embed_positions.weight''' ) lowerCAmelCase , lowerCAmelCase = torch_model.load_state_dict(snake_case__ , strict=snake_case__ ) lowerCAmelCase = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}" assert extra == [], f"no matches found for the following tf keys {extra}" return torch_model def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Dict: lowerCAmelCase = tf.train.list_variables(snake_case__ ) lowerCAmelCase = {} lowerCAmelCase = ['''global_step'''] for name, shape in tqdm(snake_case__ , desc='''converting tf checkpoint to dict''' ): lowerCAmelCase = any(pat in name for pat in ignore_name ) if skip_key: continue lowerCAmelCase = tf.train.load_variable(snake_case__ , snake_case__ ) lowerCAmelCase = array return tf_weights def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int: lowerCAmelCase = get_tf_weights_as_numpy(snake_case__ ) lowerCAmelCase = convert_bigbird_pegasus(snake_case__ , snake_case__ ) torch_model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') lowercase__ : Tuple = parser.parse_args() lowercase__ : int = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
338
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
338
1
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm lowercase__ : int = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex lowercase__ : Tuple = 1_0 lowercase__ : Tuple = 2_5_6 def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[MinHash]: if len(snake_case__ ) < MIN_NUM_TOKENS: return None lowerCAmelCase = MinHash(num_perm=snake_case__ ) for token in set(snake_case__ ): min_hash.update(token.encode() ) return min_hash def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Set[str]: return {t for t in NON_ALPHA.split(snake_case__ ) if len(t.strip() ) > 0} class lowercase_ : """simple docstring""" def __init__( self , *, __SCREAMING_SNAKE_CASE = 0.8_5 , ) ->Any: lowerCAmelCase = duplication_jaccard_threshold lowerCAmelCase = NUM_PERM lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) lowerCAmelCase = defaultdict(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->None: lowerCAmelCase = self._index.query(__SCREAMING_SNAKE_CASE ) if code_key in self._index.keys: print(F"Duplicate key {code_key}" ) return self._index.insert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(__SCREAMING_SNAKE_CASE ) break else: self._duplicate_clusters[close_duplicates[0]].add(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[List[Dict]]: lowerCAmelCase = [] for base, duplicates in self._duplicate_clusters.items(): lowerCAmelCase = [base] + list(__SCREAMING_SNAKE_CASE ) # reformat the cluster to be a list of dict lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster] duplicate_clusters.append(__SCREAMING_SNAKE_CASE ) return duplicate_clusters def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->None: lowerCAmelCase = self.get_duplicate_clusters() with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int: lowerCAmelCase , lowerCAmelCase = element lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int: with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(snake_case__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ): if data is not None: yield data def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> int: lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=snake_case__ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(snake_case__ ) ) , max_queue_size=1_0_0 ) ): di.add(snake_case__ , snake_case__ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> float: lowerCAmelCase = get_tokens(snake_case__ ) lowerCAmelCase = get_tokens(snake_case__ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) lowercase__ : int = None def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]: lowerCAmelCase = [] for elementa in cluster: lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] for elementa in extremes: lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] if jaccard_similarity(snake_case__ , snake_case__ ) >= jaccard_threshold: elementa["copies"] += 1 break else: lowerCAmelCase = 1 extremes.append(snake_case__ ) return extremes def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]: global _shared_dataset lowerCAmelCase = dataset lowerCAmelCase = [] lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=snake_case__ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( snake_case__ , snake_case__ , ) , total=len(snake_case__ ) , ): extremes_list.append(snake_case__ ) return extremes_list def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]: lowerCAmelCase = make_duplicate_clusters(snake_case__ , snake_case__ ) lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster} lowerCAmelCase = {} lowerCAmelCase = find_extremes(snake_case__ , snake_case__ , snake_case__ ) for extremes in extremes_clusters: for element in extremes: lowerCAmelCase = element lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() ) lowerCAmelCase = dataset.filter(lambda snake_case__ , snake_case__ : idx not in remove_indices , with_indices=snake_case__ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: lowerCAmelCase = element['''base_index'''] in extreme_dict if element["is_extreme"]: lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies'''] print(f"Original dataset size: {len(snake_case__ )}" ) print(f"Number of duplicate clusters: {len(snake_case__ )}" ) print(f"Files in duplicate cluster: {len(snake_case__ )}" ) print(f"Unique files in duplicate cluster: {len(snake_case__ )}" ) print(f"Filtered dataset size: {len(snake_case__ )}" ) return ds_filter, duplicate_clusters
338
import os import re import shutil import sys import tempfile import unittest import black lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. lowercase__ : Dict = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) ) lowerCAmelCase = self.transformer_dir shutil.copy( os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = '''src/transformers''' shutil.rmtree(self.transformer_dir ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]: lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' ) with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f: f.write(__SCREAMING_SNAKE_CASE ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f: self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: # Base copy consistency self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , ) # Copy consistency with a really long name lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md'''] lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),''' ''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**''' ''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders''' ''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang''' ''' Luong, Quoc V. Le, Christopher D. Manning.''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文''' ''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自''' ''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather''' ''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,''' ''' Christopher D. Manning 发布。\n''' ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) self.assertFalse(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and''' ''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) # Check if the model link is synchronized. self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
338
1
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets lowercase__ : Optional[Any] = '''\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } ''' lowercase__ : List[Any] = '''\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. ''' lowercase__ : Dict = R''' Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {\'accuracy\': 1.0} ''' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->int: lowerCAmelCase = 0.0 for i, j in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): n_correct += 1.0 if math_equivalence.is_equiv(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else 0.0 lowerCAmelCase = n_correct / len(__SCREAMING_SNAKE_CASE ) return { "accuracy": accuracy, }
338
import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase = split_dict._to_yaml_list() assert len(snake_case__ ) == len(snake_case__ ) lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCAmelCase = None # the split name of split_dict takes over the name of the split info object lowerCAmelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]: # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
338
1
from sklearn.metrics import mean_squared_error import datasets lowercase__ : List[str] = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowercase__ : Optional[int] = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowercase__ : int = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="uniform_average" , __SCREAMING_SNAKE_CASE=True ) ->Union[str, Any]: lowerCAmelCase = mean_squared_error( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE , multioutput=__SCREAMING_SNAKE_CASE , squared=__SCREAMING_SNAKE_CASE ) return {"mse": mse}
338
import unittest import numpy as np def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray: lowerCAmelCase = np.shape(snake_case__ ) lowerCAmelCase = np.shape(snake_case__ ) lowerCAmelCase = np.shape(snake_case__ ) if shape_a[0] != shape_b[0]: lowerCAmelCase = ( '''Expected the same number of rows for A and B. ''' f"Instead found A of size {shape_a} and B of size {shape_b}" ) raise ValueError(snake_case__ ) if shape_b[1] != shape_c[1]: lowerCAmelCase = ( '''Expected the same number of columns for B and C. ''' f"Instead found B of size {shape_b} and C of size {shape_c}" ) raise ValueError(snake_case__ ) lowerCAmelCase = pseudo_inv if a_inv is None: try: lowerCAmelCase = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1], [6, 3]] ) lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.block([[a, b], [b.T, c]] ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s ) def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
338
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowercase__ : Dict = { '''vocab_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''', }, '''merges_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''', }, } lowercase__ : List[Any] = { '''gpt2''': 1_0_2_4, '''gpt2-medium''': 1_0_2_4, '''gpt2-large''': 1_0_2_4, '''gpt2-xl''': 1_0_2_4, '''distilgpt2''': 1_0_2_4, } class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = VOCAB_FILES_NAMES UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : Any = ["""input_ids""", """attention_mask"""] UpperCAmelCase_ : Tuple = GPTaTokenizer def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->List[str]: super().__init__( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = kwargs.pop('''add_bos_token''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space: lowerCAmelCase = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) ) lowerCAmelCase = add_prefix_space lowerCAmelCase = pre_tok_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = add_prefix_space def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->BatchEncoding: lowerCAmelCase = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->BatchEncoding: lowerCAmelCase = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]: lowerCAmelCase = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE ) return tuple(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[int]: lowerCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) + [self.eos_token_id] ) if len(__SCREAMING_SNAKE_CASE ) > self.model_max_length: lowerCAmelCase = input_ids[-self.model_max_length :] return input_ids
338
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowercase__ : Any = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: lowerCAmelCase = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) lowercase__ : List[Any] = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase = list(s_dict.keys() ) for key in keys: lowerCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ ) print(f"{key} -> {new_key}" ) lowerCAmelCase = s_dict.pop(snake_case__ ) return s_dict def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase , lowerCAmelCase = emb.weight.shape lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) lowerCAmelCase = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes: os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowerCAmelCase = os.path.basename(snake_case__ ) lowerCAmelCase = url.split('''/''' )[-2] lowerCAmelCase = os.path.join(snake_case__ , snake_case__ ) if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ): raise RuntimeError(f"{download_target} exists and is not a regular file" ) if os.path.isfile(snake_case__ ): lowerCAmelCase = open(snake_case__ , '''rb''' ).read() if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop: while True: lowerCAmelCase = source.read(8_1_9_2 ) if not buffer: break output.write(snake_case__ ) loop.update(len(snake_case__ ) ) lowerCAmelCase = open(snake_case__ , '''rb''' ).read() if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: if ".pt" not in checkpoint_path: lowerCAmelCase = _download(_MODELS[checkpoint_path] ) else: lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' ) lowerCAmelCase = original_checkpoint['''dims'''] lowerCAmelCase = original_checkpoint['''model_state_dict'''] lowerCAmelCase = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(snake_case__ ) rename_keys(snake_case__ ) lowerCAmelCase = True lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] lowerCAmelCase = WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ ) lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ ) if len(snake_case__ ) > 0 and not set(snake_case__ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f" but all the following weights are missing {missing}" ) if tie_embeds: lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: lowerCAmelCase = proj_out_weights model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase__ : List[str] = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowercase__ : int = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
338
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowercase__ : Tuple = logging.get_logger(__name__) lowercase__ : Tuple = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = """deta""" UpperCAmelCase_ : Dict = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=900 , __SCREAMING_SNAKE_CASE=2048 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.2_5 , **__SCREAMING_SNAKE_CASE , ) ->List[str]: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCAmelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = backbone_config.pop('''model_type''' ) lowerCAmelCase = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase = config_class.from_dict(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = backbone_config lowerCAmelCase = num_queries lowerCAmelCase = max_position_embeddings lowerCAmelCase = d_model lowerCAmelCase = encoder_ffn_dim lowerCAmelCase = encoder_layers lowerCAmelCase = encoder_attention_heads lowerCAmelCase = decoder_ffn_dim lowerCAmelCase = decoder_layers lowerCAmelCase = decoder_attention_heads lowerCAmelCase = dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = activation_function lowerCAmelCase = init_std lowerCAmelCase = init_xavier_std lowerCAmelCase = encoder_layerdrop lowerCAmelCase = auxiliary_loss lowerCAmelCase = position_embedding_type # deformable attributes lowerCAmelCase = num_feature_levels lowerCAmelCase = encoder_n_points lowerCAmelCase = decoder_n_points lowerCAmelCase = two_stage lowerCAmelCase = two_stage_num_proposals lowerCAmelCase = with_box_refine lowerCAmelCase = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher lowerCAmelCase = class_cost lowerCAmelCase = bbox_cost lowerCAmelCase = giou_cost # Loss coefficients lowerCAmelCase = mask_loss_coefficient lowerCAmelCase = dice_loss_coefficient lowerCAmelCase = bbox_loss_coefficient lowerCAmelCase = giou_loss_coefficient lowerCAmelCase = eos_coefficient lowerCAmelCase = focal_alpha super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->int: return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE_ ( self ) ->int: return self.d_model def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = copy.deepcopy(self.__dict__ ) lowerCAmelCase = self.backbone_config.to_dict() lowerCAmelCase = self.__class__.model_type return output
338
from ...processing_utils import ProcessorMixin class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""] UpperCAmelCase_ : Optional[int] = """TvltImageProcessor""" UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]: super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = image_processor lowerCAmelCase = feature_extractor def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]: if images is None and audio is None: raise ValueError('''You need to specify either an `images` or `audio` input to process.''' ) lowerCAmelCase = None if images is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if images_mixed is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if audio is not None: lowerCAmelCase = self.feature_extractor( __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} if audio is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) if images is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) if images_mixed_dict is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) return output_dict @property def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.image_processor.model_input_names lowerCAmelCase = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
338
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ : Any = logging.get_logger(__name__) lowercase__ : int = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : str = """levit""" def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=[128, 256, 384] , __SCREAMING_SNAKE_CASE=[4, 8, 12] , __SCREAMING_SNAKE_CASE=[4, 4, 4] , __SCREAMING_SNAKE_CASE=[16, 16, 16] , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=0.0_2 , **__SCREAMING_SNAKE_CASE , ) ->int: super().__init__(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = image_size lowerCAmelCase = num_channels lowerCAmelCase = kernel_size lowerCAmelCase = stride lowerCAmelCase = padding lowerCAmelCase = hidden_sizes lowerCAmelCase = num_attention_heads lowerCAmelCase = depths lowerCAmelCase = key_dim lowerCAmelCase = drop_path_rate lowerCAmelCase = patch_size lowerCAmelCase = attention_ratio lowerCAmelCase = mlp_ratio lowerCAmelCase = initializer_range lowerCAmelCase = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Dict = version.parse("""1.11""" ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->float: return 1e-4
338
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]: lowerCAmelCase = len(snake_case__ ) for i in range(length - 1 ): lowerCAmelCase = i for k in range(i + 1 , snake_case__ ): if collection[k] < collection[least]: lowerCAmelCase = k if least != i: lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least]) return collection if __name__ == "__main__": lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip() lowercase__ : str = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
338
1
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass lowercase__ : Tuple = (3, 9, -1_1, 0, 7, 5, 1, -1) lowercase__ : Union[str, Any] = (4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : int UpperCAmelCase_ : Node | None class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE ) ->None: lowerCAmelCase = None for i in sorted(__SCREAMING_SNAKE_CASE , reverse=__SCREAMING_SNAKE_CASE ): lowerCAmelCase = Node(__SCREAMING_SNAKE_CASE , self.head ) def __iter__( self ) ->Iterator[int]: lowerCAmelCase = self.head while node: yield node.data lowerCAmelCase = node.next_node def __len__( self ) ->int: return sum(1 for _ in self ) def __str__( self ) ->str: return " -> ".join([str(__SCREAMING_SNAKE_CASE ) for node in self] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> SortedLinkedList: return SortedLinkedList(list(snake_case__ ) + list(snake_case__ ) ) if __name__ == "__main__": import doctest doctest.testmod() lowercase__ : Dict = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
338
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Union[str, Any]: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , ) return config def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float() model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = False UpperCAmelCase_ : Dict = (EsmForProteinFolding,) if is_torch_available() else () UpperCAmelCase_ : List[Any] = () UpperCAmelCase_ : Tuple = {} if is_torch_available() else {} UpperCAmelCase_ : List[str] = False def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = EsmFoldModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) @unittest.skip('''Does not support attention outputs''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: pass @unittest.skip def SCREAMING_SNAKE_CASE_ ( self ) ->Any: pass @unittest.skip('''Esm does not support embedding resizing''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''Esm does not support embedding resizing''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold does not support passing input embeds!''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass @unittest.skip('''ESMFold does not output hidden states in the normal way.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: pass @unittest.skip('''ESMfold does not output hidden states in the normal way.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold only has one output format.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold does not support input chunking.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: pass @unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold doesn\'t support data parallel.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass @require_torch class lowercase_ ( UpperCamelCase_ ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float() model.eval() lowerCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )['''positions'''] lowerCAmelCase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
338
1
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> bool: return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('''Program to check whether a number is a Perfect number or not...''') lowercase__ : List[Any] = int(input('''Enter number: ''').strip()) print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
338
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""] UpperCAmelCase_ : int = """OwlViTImageProcessor""" UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any: lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __SCREAMING_SNAKE_CASE , ) lowerCAmelCase = kwargs.pop('''feature_extractor''' ) lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int: if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )): lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )] elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = [] # Maximum number of queries across batch lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__SCREAMING_SNAKE_CASE ) != max_num_queries: lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE )) lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) encodings.append(__SCREAMING_SNAKE_CASE ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) lowerCAmelCase = BatchEncoding() lowerCAmelCase = input_ids lowerCAmelCase = attention_mask if query_images is not None: lowerCAmelCase = BatchEncoding() lowerCAmelCase = self.image_processor( __SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values lowerCAmelCase = query_pixel_values if images is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is not None and images is not None: lowerCAmelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCAmelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]: return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any: return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple: return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str: return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]: return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE_ ( self ) ->int: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor
338
1
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> float: if digit_amount > 0: return round(number - int(snake_case__ ) , snake_case__ ) return number - int(snake_case__ ) if __name__ == "__main__": print(decimal_isolate(1.5_3, 0)) print(decimal_isolate(3_5.3_4_5, 1)) print(decimal_isolate(3_5.3_4_5, 2)) print(decimal_isolate(3_5.3_4_5, 3)) print(decimal_isolate(-1_4.7_8_9, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-1_4.1_2_3, 1)) print(decimal_isolate(-1_4.1_2_3, 2)) print(decimal_isolate(-1_4.1_2_3, 3))
338
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase__ : List[Any] = logging.get_logger(__name__) lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''} lowercase__ : Optional[int] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } lowercase__ : Any = { '''albert-base-v1''': 5_1_2, '''albert-large-v1''': 5_1_2, '''albert-xlarge-v1''': 5_1_2, '''albert-xxlarge-v1''': 5_1_2, '''albert-base-v2''': 5_1_2, '''albert-large-v2''': 5_1_2, '''albert-xlarge-v2''': 5_1_2, '''albert-xxlarge-v2''': 5_1_2, } lowercase__ : Tuple = '''▁''' class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase = ( AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token ) lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = do_lower_case lowerCAmelCase = remove_space lowerCAmelCase = keep_accents lowerCAmelCase = vocab_file lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->Any: return len(self.sp_model ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) ->int: lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any: if self.remove_space: lowerCAmelCase = ''' '''.join(inputs.strip().split() ) else: lowerCAmelCase = inputs lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] ) if self.do_lower_case: lowerCAmelCase = outputs.lower() return outputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] for piece in pieces: if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCAmelCase = cur_pieces[1:] else: lowerCAmelCase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__SCREAMING_SNAKE_CASE ) else: new_pieces.append(__SCREAMING_SNAKE_CASE ) return new_pieces def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int: return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int: return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]: lowerCAmelCase = [] lowerCAmelCase = '''''' lowerCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token lowerCAmelCase = True lowerCAmelCase = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string.strip() def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is not None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
338
1
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : Optional[Union[str, Path]] = None UpperCAmelCase_ : bool = False UpperCAmelCase_ : bool = False UpperCAmelCase_ : bool = False UpperCAmelCase_ : Optional[Dict] = None UpperCAmelCase_ : Optional[str] = None UpperCAmelCase_ : bool = False UpperCAmelCase_ : bool = False UpperCAmelCase_ : bool = False UpperCAmelCase_ : bool = True UpperCAmelCase_ : Optional[int] = None UpperCAmelCase_ : int = 1 UpperCAmelCase_ : Optional[Union[str, bool]] = None UpperCAmelCase_ : bool = False UpperCAmelCase_ : Optional[Dict] = None UpperCAmelCase_ : Optional[str] = None def SCREAMING_SNAKE_CASE_ ( self ) ->"DownloadConfig": return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE ) for k, v in self.__dict__.items()} )
338
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,) UpperCAmelCase_ : int = (("""num_inference_steps""", 25),) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str: lowerCAmelCase = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, } config.update(**__SCREAMING_SNAKE_CASE ) return config def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCAmelCase , lowerCAmelCase = sample, sample for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]: if scheduler is None: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = 10 lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample return sample def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ): lowerCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] lowerCAmelCase = scheduler.timesteps[5] lowerCAmelCase = scheduler.timesteps[6] lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: # make sure that iterating over schedulers with same config names gives same results # for defaults lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() ) lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = self.full_loop( solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , ) assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.full_loop() lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = 10 lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa
338
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : Optional[int] = { '''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''', } class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Any = """instructblip_vision_model""" def __init__( self , __SCREAMING_SNAKE_CASE=1408 , __SCREAMING_SNAKE_CASE=6144 , __SCREAMING_SNAKE_CASE=39 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=14 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1e-10 , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) ->Tuple: super().__init__(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = hidden_size lowerCAmelCase = intermediate_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = patch_size lowerCAmelCase = image_size lowerCAmelCase = initializer_range lowerCAmelCase = attention_dropout lowerCAmelCase = layer_norm_eps lowerCAmelCase = hidden_act lowerCAmelCase = qkv_bias @classmethod def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->"PretrainedConfig": cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('''model_type''' ) == "instructblip": lowerCAmelCase = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : int = """instructblip_qformer""" def __init__( self , __SCREAMING_SNAKE_CASE=30522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1408 , **__SCREAMING_SNAKE_CASE , ) ->Optional[int]: super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = cross_attention_frequency lowerCAmelCase = encoder_hidden_size @classmethod def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->"PretrainedConfig": cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('''model_type''' ) == "instructblip": lowerCAmelCase = config_dict['''qformer_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : str = """instructblip""" UpperCAmelCase_ : Dict = True def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=32 , **__SCREAMING_SNAKE_CASE ) ->str: super().__init__(**__SCREAMING_SNAKE_CASE ) if vision_config is None: lowerCAmelCase = {} logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' ) if qformer_config is None: lowerCAmelCase = {} logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' ) if text_config is None: lowerCAmelCase = {} logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' ) lowerCAmelCase = InstructBlipVisionConfig(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = InstructBlipQFormerConfig(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = text_config['''model_type'''] if '''model_type''' in text_config else '''opt''' lowerCAmelCase = CONFIG_MAPPING[text_model_type](**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.text_config.tie_word_embeddings lowerCAmelCase = self.text_config.is_encoder_decoder lowerCAmelCase = num_query_tokens lowerCAmelCase = self.vision_config.hidden_size lowerCAmelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES lowerCAmelCase = 1.0 lowerCAmelCase = 0.0_2 @classmethod def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->Optional[Any]: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = copy.deepcopy(self.__dict__ ) lowerCAmelCase = self.vision_config.to_dict() lowerCAmelCase = self.qformer_config.to_dict() lowerCAmelCase = self.text_config.to_dict() lowerCAmelCase = self.__class__.model_type return output
338
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowercase_ ( unittest.TestCase ): """simple docstring""" @property def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: torch.manual_seed(0 ) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = self.dummy_uncond_unet lowerCAmelCase = KarrasVeScheduler() lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = '''google/ncsnpp-celebahq-256''' lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = KarrasVeScheduler() lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
338
1
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class lowercase_ ( unittest.TestCase ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=4 , ) ->str: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_attention_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_choices def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_attention_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.prepare_config_and_inputs() lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class lowercase_ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : str = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = FlaxAlbertModelTester(self ) @slow def SCREAMING_SNAKE_CASE_ ( self ) ->int: for model_class_name in self.all_model_classes: lowerCAmelCase = model_class_name.from_pretrained('''albert-base-v2''' ) lowerCAmelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_flax class lowercase_ ( unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' ) lowerCAmelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowerCAmelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] lowerCAmelCase = (1, 11, 768) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
338
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch lowercase__ : Dict = logging.get_logger(__name__) @add_end_docstrings( UpperCamelCase_ , r""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ , ) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray: if self.framework == "tf": lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ) else: raise ValueError('''Unsupported framework''' ) return masked_index def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray: lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]: if return_tensors is None: lowerCAmelCase = self.framework lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model_inputs['''input_ids'''] return model_outputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: lowerCAmelCase = target_ids.shape[0] lowerCAmelCase = model_outputs['''input_ids'''][0] lowerCAmelCase = model_outputs['''logits'''] if self.framework == "tf": lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] lowerCAmelCase = outputs.numpy() lowerCAmelCase = outputs[0, masked_index, :] lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) if target_ids is not None: lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) ) lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 ) lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy() else: lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample lowerCAmelCase = outputs[0, masked_index, :] lowerCAmelCase = logits.softmax(dim=-1 ) if target_ids is not None: lowerCAmelCase = probs[..., target_ids] lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] lowerCAmelCase = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): lowerCAmelCase = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place lowerCAmelCase = input_ids.numpy().copy() if target_ids is not None: lowerCAmelCase = target_ids[p].tolist() lowerCAmelCase = p # Filter padding out: lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence} row.append(__SCREAMING_SNAKE_CASE ) result.append(__SCREAMING_SNAKE_CASE ) if single_mask: return result[0] return result def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = [targets] try: lowerCAmelCase = self.tokenizer.get_vocab() except Exception: lowerCAmelCase = {} lowerCAmelCase = [] for target in targets: lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if id_ is None: lowerCAmelCase = self.tokenizer( __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids'''] if len(__SCREAMING_SNAKE_CASE ) == 0: logger.warning( F"The specified target token `{target}` does not exist in the model vocabulary. " '''We cannot replace it with anything meaningful, ignoring it''' ) continue lowerCAmelCase = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"The specified target token `{target}` does not exist in the model vocabulary. " F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." ) target_ids.append(id_ ) lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) ) if len(__SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) return target_ids def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict: lowerCAmelCase = {} if targets is not None: lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = target_ids if top_k is not None: lowerCAmelCase = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]: lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1: return outputs[0] return outputs
338
1
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""] UpperCAmelCase_ : int = """OwlViTImageProcessor""" UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any: lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __SCREAMING_SNAKE_CASE , ) lowerCAmelCase = kwargs.pop('''feature_extractor''' ) lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int: if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )): lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )] elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = [] # Maximum number of queries across batch lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__SCREAMING_SNAKE_CASE ) != max_num_queries: lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE )) lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) encodings.append(__SCREAMING_SNAKE_CASE ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) lowerCAmelCase = BatchEncoding() lowerCAmelCase = input_ids lowerCAmelCase = attention_mask if query_images is not None: lowerCAmelCase = BatchEncoding() lowerCAmelCase = self.image_processor( __SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values lowerCAmelCase = query_pixel_values if images is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is not None and images is not None: lowerCAmelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCAmelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]: return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any: return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple: return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str: return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]: return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE_ ( self ) ->int: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor
338
from typing import TYPE_CHECKING from ...utils import _LazyModule lowercase__ : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
338
1
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). lowercase__ : Optional[int] = [0, 2_5, 5_0] lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5] lowercase__ : int = fuzz.membership.trimf(X, abca) lowercase__ : Tuple = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. lowercase__ : List[str] = np.ones(7_5) lowercase__ : Any = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] lowercase__ : Any = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) lowercase__ : str = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('''Young''') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('''Middle aged''') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('''union''') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('''intersection''') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('''complement_a''') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('''difference a/b''') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('''alg_sum''') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('''alg_product''') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('''bdd_sum''') plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title('''bdd_difference''') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
338
lowercase__ : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def SCREAMING_SNAKE_CASE_ ( ) -> None: lowerCAmelCase = input('''Enter message: ''' ) lowerCAmelCase = input('''Enter key [alphanumeric]: ''' ) lowerCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' ) if mode.lower().startswith('''e''' ): lowerCAmelCase = '''encrypt''' lowerCAmelCase = encrypt_message(snake_case__ , snake_case__ ) elif mode.lower().startswith('''d''' ): lowerCAmelCase = '''decrypt''' lowerCAmelCase = decrypt_message(snake_case__ , snake_case__ ) print(f"\n{mode.title()}ed message:" ) print(snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: return translate_message(snake_case__ , snake_case__ , '''encrypt''' ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: return translate_message(snake_case__ , snake_case__ , '''decrypt''' ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str: lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = key.upper() for symbol in message: lowerCAmelCase = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(snake_case__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(snake_case__ ): lowerCAmelCase = 0 else: translated.append(snake_case__ ) return "".join(snake_case__ ) if __name__ == "__main__": main()
338
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase__ : List[str] = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : List[str] = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
338
from collections import defaultdict from math import ceil, sqrt def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int: lowerCAmelCase = defaultdict(snake_case__ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: lowerCAmelCase = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: lowerCAmelCase = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(snake_case__ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(f'{solution() = }')
338
1
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf lowercase__ : Union[str, Any] = logging.get_logger(__name__) @dataclass class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self , **__SCREAMING_SNAKE_CASE ) ->Dict: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowerCAmelCase = deprecated_arg[3:] lowerCAmelCase = not kwargs.pop(__SCREAMING_SNAKE_CASE ) logger.warning( F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or" F" {positive_arg}={kwargs[positive_arg]}" ) lowerCAmelCase = kwargs.pop('''tpu_name''' , self.tpu_name ) lowerCAmelCase = kwargs.pop('''device_idx''' , self.device_idx ) lowerCAmelCase = kwargs.pop('''eager_mode''' , self.eager_mode ) lowerCAmelCase = kwargs.pop('''use_xla''' , self.use_xla ) super().__init__(**__SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = field( default=UpperCamelCase_ , metadata={"""help""": """Name of TPU"""} , ) UpperCAmelCase_ : int = field( default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , ) UpperCAmelCase_ : bool = field(default=UpperCamelCase_ , metadata={"""help""": """Benchmark models in eager model."""} ) UpperCAmelCase_ : bool = field( default=UpperCamelCase_ , metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" } , ) @cached_property def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ['''tf'''] ) lowerCAmelCase = None if self.tpu: try: if self.tpu_name: lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: lowerCAmelCase = None return tpu @cached_property def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ['''tf'''] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) lowerCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' ) lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}" ) else: tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}" ) return strategy @property def SCREAMING_SNAKE_CASE_ ( self ) ->bool: requires_backends(self , ['''tf'''] ) return self._setup_tpu is not None @property def SCREAMING_SNAKE_CASE_ ( self ) ->"tf.distribute.Strategy": requires_backends(self , ['''tf'''] ) return self._setup_strategy @property def SCREAMING_SNAKE_CASE_ ( self ) ->str: requires_backends(self , ['''tf'''] ) return tf.config.list_physical_devices('''GPU''' ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->int: requires_backends(self , ['''tf'''] ) if self.cuda: return len(self.gpu_list ) return 0 @property def SCREAMING_SNAKE_CASE_ ( self ) ->bool: return self.n_gpu > 0
338
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]: assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = features.copy() if features else default_expected_features lowerCAmelCase = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: if issubclass(snake_case__ , snake_case__ ): lowerCAmelCase = text_path elif issubclass(snake_case__ , snake_case__ ): lowerCAmelCase = [text_path] lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]: assert isinstance(snake_case__ , snake_case__ ) for split in splits: lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: lowerCAmelCase = tmp_path / '''cache''' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = features.copy() if features else default_expected_features lowerCAmelCase = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any: if split: lowerCAmelCase = {split: text_path} else: lowerCAmelCase = '''train''' lowerCAmelCase = {'''train''': text_path, '''test''': text_path} lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
338
1
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Tuple: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , use_stable_embedding=__SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->List[Any]: lowerCAmelCase = OpenLlamaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->List[Any]: lowerCAmelCase = True lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->Union[str, Any]: lowerCAmelCase = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->Optional[int]: lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() # first forward pass lowerCAmelCase = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] lowerCAmelCase = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] # select random slice lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) UpperCAmelCase_ : Union[str, Any] = (OpenLlamaForCausalLM,) if is_torch_available() else () UpperCAmelCase_ : Any = ( { """feature-extraction""": OpenLlamaModel, """text-classification""": OpenLlamaForSequenceClassification, """text-generation""": OpenLlamaForCausalLM, """zero-shot""": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase_ : Tuple = False UpperCAmelCase_ : List[str] = False def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = OpenLlamaModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase = type self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = input_dict['''input_ids'''] lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = '''single_label_classification''' lowerCAmelCase = input_dict['''input_ids'''] lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = '''multi_label_classification''' lowerCAmelCase = input_dict['''input_ids'''] lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Dict: lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size ) lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE ) original_model.to(__SCREAMING_SNAKE_CASE ) original_model.eval() lowerCAmelCase = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state lowerCAmelCase = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase = {'''type''': scaling_type, '''factor''': 1_0.0} lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE ) scaled_model.to(__SCREAMING_SNAKE_CASE ) scaled_model.eval() lowerCAmelCase = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state lowerCAmelCase = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
338
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: if isinstance(snake_case__ , snake_case__ ): raise TypeError('''\'float\' object cannot be interpreted as an integer''' ) if isinstance(snake_case__ , snake_case__ ): raise TypeError('''\'str\' object cannot be interpreted as an integer''' ) if num == 0: return "0b0" lowerCAmelCase = False if num < 0: lowerCAmelCase = True lowerCAmelCase = -num lowerCAmelCase = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(snake_case__ ) for e in binary ) return "0b" + "".join(str(snake_case__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
338
1
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = DistilBertTokenizer UpperCAmelCase_ : Dict = DistilBertTokenizerFast UpperCAmelCase_ : Optional[Any] = True @slow def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
338
class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any: lowerCAmelCase = name lowerCAmelCase = value lowerCAmelCase = weight def __repr__( self ) ->str: return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: return self.value def SCREAMING_SNAKE_CASE_ ( self ) ->int: return self.name def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: return self.weight def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: return self.value / self.weight def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int: lowerCAmelCase = [] for i in range(len(snake_case__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ ) lowerCAmelCase = [] lowerCAmelCase , lowerCAmelCase = 0.0, 0.0 for i in range(len(snake_case__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: pass if __name__ == "__main__": import doctest doctest.testmod()
338
1
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ) -> str: lowerCAmelCase = params[f"{prefix}/layers_{i}/{layer_name}/key/kernel"] lowerCAmelCase = params[f"{prefix}/layers_{i}/{layer_name}/out/kernel"] lowerCAmelCase = params[f"{prefix}/layers_{i}/{layer_name}/query/kernel"] lowerCAmelCase = params[f"{prefix}/layers_{i}/{layer_name}/value/kernel"] return k, o, q, v def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ) -> Optional[Any]: if split_mlp_wi: lowerCAmelCase = params[f"{prefix}/layers_{i}/mlp/wi_0/kernel"] lowerCAmelCase = params[f"{prefix}/layers_{i}/mlp/wi_1/kernel"] lowerCAmelCase = (wi_a, wi_a) else: lowerCAmelCase = params[f"{prefix}/layers_{i}/mlp/wi/kernel"] lowerCAmelCase = params[f"{prefix}/layers_{i}/mlp/wo/kernel"] return wi, wo def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple: return params[f"{prefix}/layers_{i}/{layer_name}/scale"] def SCREAMING_SNAKE_CASE_ ( snake_case__ , *, snake_case__ , snake_case__ ) -> Optional[Any]: lowerCAmelCase = traverse_util.flatten_dict(variables['''target'''] ) lowerCAmelCase = {'''/'''.join(snake_case__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase = '''encoder/layers_0/mlp/wi_0/kernel''' in old print('''Split MLP:''' , snake_case__ ) lowerCAmelCase = collections.OrderedDict() # Shared embeddings. lowerCAmelCase = old['''token_embedder/embedding'''] # Encoder. for i in range(snake_case__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''encoder''' , '''pre_attention_layer_norm''' ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = tax_attention_lookup(snake_case__ , snake_case__ , '''encoder''' , '''attention''' ) lowerCAmelCase = layer_norm lowerCAmelCase = k.T lowerCAmelCase = o.T lowerCAmelCase = q.T lowerCAmelCase = v.T # Block i, layer 1 (MLP). lowerCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''encoder''' , '''pre_mlp_layer_norm''' ) lowerCAmelCase , lowerCAmelCase = tax_mlp_lookup(snake_case__ , snake_case__ , '''encoder''' , snake_case__ ) lowerCAmelCase = layer_norm if split_mlp_wi: lowerCAmelCase = wi[0].T lowerCAmelCase = wi[1].T else: lowerCAmelCase = wi.T lowerCAmelCase = wo.T lowerCAmelCase = old[ '''encoder/relpos_bias/rel_embedding''' ].T lowerCAmelCase = old['''encoder/encoder_norm/scale'''] if not is_encoder_only: # Decoder. for i in range(snake_case__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''decoder''' , '''pre_self_attention_layer_norm''' ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = tax_attention_lookup(snake_case__ , snake_case__ , '''decoder''' , '''self_attention''' ) lowerCAmelCase = layer_norm lowerCAmelCase = k.T lowerCAmelCase = o.T lowerCAmelCase = q.T lowerCAmelCase = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''decoder''' , '''pre_cross_attention_layer_norm''' ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = tax_attention_lookup(snake_case__ , snake_case__ , '''decoder''' , '''encoder_decoder_attention''' ) lowerCAmelCase = layer_norm lowerCAmelCase = k.T lowerCAmelCase = o.T lowerCAmelCase = q.T lowerCAmelCase = v.T # Block i, layer 2 (MLP). lowerCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''decoder''' , '''pre_mlp_layer_norm''' ) lowerCAmelCase , lowerCAmelCase = tax_mlp_lookup(snake_case__ , snake_case__ , '''decoder''' , snake_case__ ) lowerCAmelCase = layer_norm if split_mlp_wi: lowerCAmelCase = wi[0].T lowerCAmelCase = wi[1].T else: lowerCAmelCase = wi.T lowerCAmelCase = wo.T lowerCAmelCase = old['''decoder/decoder_norm/scale'''] lowerCAmelCase = old[ '''decoder/relpos_bias/rel_embedding''' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase = old['''decoder/logits_dense/kernel'''].T return new def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]: lowerCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) lowerCAmelCase = state_dict['''shared.weight'''] return state_dict def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]: lowerCAmelCase = checkpoints.load_tax_checkpoint(snake_case__ ) lowerCAmelCase = convert_tax_to_pytorch(snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ ) lowerCAmelCase = make_state_dict(snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ , strict=snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> int: lowerCAmelCase = TaConfig.from_json_file(snake_case__ ) print(f"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase = TaEncoderModel(snake_case__ ) else: lowerCAmelCase = TaForConditionalGeneration(snake_case__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(snake_case__ ) # Verify that we can load the checkpoint. model.from_pretrained(snake_case__ ) print('''Done''' ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) lowercase__ : Optional[Any] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
338
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). lowercase__ : Optional[int] = [0, 2_5, 5_0] lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5] lowercase__ : int = fuzz.membership.trimf(X, abca) lowercase__ : Tuple = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. lowercase__ : List[str] = np.ones(7_5) lowercase__ : Any = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] lowercase__ : Any = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) lowercase__ : str = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('''Young''') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('''Middle aged''') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('''union''') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('''intersection''') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('''complement_a''') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('''difference a/b''') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('''alg_sum''') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('''alg_product''') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('''bdd_sum''') plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title('''bdd_difference''') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
338
1
# Function to print upper half of diamond (pyramid) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Dict: for i in range(0 , snake_case__ ): for _ in range(0 , n - i - 1 ): # printing spaces print(''' ''' , end='''''' ) for _ in range(0 , i + 1 ): # printing stars print('''* ''' , end='''''' ) print() def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Tuple: for i in range(snake_case__ , 0 , -1 ): for _ in range(snake_case__ , 0 , -1 ): # printing stars print('''* ''' , end='''''' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(''' ''' , end='''''' ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: if n <= 0: print(''' ... .... nothing printing :(''' ) return floyd(snake_case__ ) # upper half reverse_floyd(snake_case__ ) # lower half if __name__ == "__main__": print(R'''| /\ | |- | |- |--| |\ /| |-''') print(R'''|/ \| |- |_ |_ |__| | \/ | |_''') lowercase__ : List[Any] = 1 while K: lowercase__ : Optional[int] = int(input('''enter the number and , and see the magic : ''')) print() pretty_print(user_number) lowercase__ : Dict = int(input('''press 0 to exit... and 1 to continue...''')) print('''Good Bye...''')
338
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : str = (DDPMScheduler,) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]: lowerCAmelCase = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__SCREAMING_SNAKE_CASE ) return config def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ): if i == len(__SCREAMING_SNAKE_CASE ) - 1: lowerCAmelCase = -1 else: lowerCAmelCase = timesteps[i + 1] lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 1, 0] lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
338
1
import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase = split_dict._to_yaml_list() assert len(snake_case__ ) == len(snake_case__ ) lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCAmelCase = None # the split name of split_dict takes over the name of the split info object lowerCAmelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]: # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
338
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase__ : str = logging.get_logger(__name__) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Any = """AutoTokenizer""" UpperCAmelCase_ : Optional[int] = ["""tokenizer"""] UpperCAmelCase_ : str = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]: super().__init__(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = speaker_embeddings @classmethod def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple: if speaker_embeddings_dict_path is not None: lowerCAmelCase = get_file_from_repo( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , ) if speaker_embeddings_path is None: logger.warning( F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." ) lowerCAmelCase = None else: with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json: lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = None lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int: if self.speaker_embeddings is not None: os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} lowerCAmelCase = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" ) lowerCAmelCase = tmp_dict with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = self.speaker_embeddings[voice_preset] lowerCAmelCase = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." ) lowerCAmelCase = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , ) if path is None: raise ValueError( F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." ) lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE ) return voice_preset_dict def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple: for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F"Voice preset unrecognized, missing {key} as a key." ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int: if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if ( isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE ) else: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ): lowerCAmelCase = voice_preset + '''.npz''' lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE ) if voice_preset is not None: self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.tokenizer( __SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) if voice_preset is not None: lowerCAmelCase = voice_preset return encoded_text
338
1
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar lowercase__ : int = TypeVar('''T''') lowercase__ : Optional[int] = TypeVar('''U''') class lowercase_ ( Generic[T, U] ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = key lowerCAmelCase = val lowerCAmelCase = None lowerCAmelCase = None def __repr__( self ) ->str: return ( F"Node: key: {self.key}, val: {self.val}, " F"has next: {bool(self.next )}, has prev: {bool(self.prev )}" ) class lowercase_ ( Generic[T, U] ): """simple docstring""" def __init__( self ) ->None: lowerCAmelCase = DoubleLinkedListNode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = DoubleLinkedListNode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = self.rear, self.head def __repr__( self ) ->str: lowerCAmelCase = ['''DoubleLinkedList'''] lowerCAmelCase = self.head while node.next is not None: rep.append(str(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = node.next rep.append(str(self.rear ) ) return ",\n ".join(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->None: lowerCAmelCase = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None lowerCAmelCase = node lowerCAmelCase = previous lowerCAmelCase = node lowerCAmelCase = self.rear def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->DoubleLinkedListNode[T, U] | None: if node.prev is None or node.next is None: return None lowerCAmelCase = node.next lowerCAmelCase = node.prev lowerCAmelCase = None lowerCAmelCase = None return node class lowercase_ ( Generic[T, U] ): """simple docstring""" UpperCAmelCase_ : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , __SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = DoubleLinkedList() lowerCAmelCase = capacity lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = {} def __repr__( self ) ->str: return ( F"CacheInfo(hits={self.hits}, misses={self.miss}, " F"capacity={self.capacity}, current size={self.num_keys})" ) def __contains__( self , __SCREAMING_SNAKE_CASE ) ->bool: return key in self.cache def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->U | None: # Note: pythonic interface would throw KeyError rather than return None if key in self.cache: self.hits += 1 lowerCAmelCase = self.cache[key] lowerCAmelCase = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(__SCREAMING_SNAKE_CASE ) return node.val self.miss += 1 return None def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->None: if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity lowerCAmelCase = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(__SCREAMING_SNAKE_CASE ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 lowerCAmelCase = DoubleLinkedListNode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value lowerCAmelCase = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list lowerCAmelCase = value self.list.add(__SCREAMING_SNAKE_CASE ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE = 128 ) ->Callable[[Callable[[T], U]], Callable[..., U]]: def cache_decorator_inner(__SCREAMING_SNAKE_CASE ) -> Callable[..., U]: def cache_decorator_wrapper(*__SCREAMING_SNAKE_CASE ) -> U: if func not in cls.decorator_function_to_instance_map: lowerCAmelCase = LRUCache(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: lowerCAmelCase = func(*__SCREAMING_SNAKE_CASE ) cls.decorator_function_to_instance_map[func].put(args[0] , __SCREAMING_SNAKE_CASE ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(__SCREAMING_SNAKE_CASE , '''cache_info''' , __SCREAMING_SNAKE_CASE ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
338
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
338
1
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowercase__ : List[str] = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class lowercase_ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = GPTSwaTokenizer UpperCAmelCase_ : Optional[int] = False UpperCAmelCase_ : int = True UpperCAmelCase_ : Optional[Any] = False def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int: lowerCAmelCase = '''This is a test''' lowerCAmelCase = '''This is a test''' return input_text, output_text def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = '''<s>''' lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2000 ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [465, 287, 265, 631, 842] ) lowerCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) # fmt: off self.assertListEqual( __SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , ) # fmt: on lowerCAmelCase = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) lowerCAmelCase = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) # fmt: off self.assertListEqual( __SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] ) # fmt: on def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = ['''This is a test''', '''I was born in 92000, and this is falsé.'''] lowerCAmelCase = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # Test that decode_fast returns the input text for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) @slow def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = [ '''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''', '''Hey there, how are you doing this fine day?''', '''This is a text with a trailing spaces followed by a dot .''', '''Häj sväjs lillebrör! =)''', '''Det är inget fel på Mr. Cool''', ] # fmt: off lowerCAmelCase = {'''input_ids''': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
338
import os import re import shutil import sys import tempfile import unittest import black lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. lowercase__ : Dict = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) ) lowerCAmelCase = self.transformer_dir shutil.copy( os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = '''src/transformers''' shutil.rmtree(self.transformer_dir ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]: lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' ) with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f: f.write(__SCREAMING_SNAKE_CASE ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f: self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: # Base copy consistency self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , ) # Copy consistency with a really long name lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md'''] lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),''' ''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**''' ''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders''' ''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang''' ''' Luong, Quoc V. Le, Christopher D. Manning.''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文''' ''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自''' ''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather''' ''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,''' ''' Christopher D. Manning 发布。\n''' ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) self.assertFalse(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and''' ''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) # Check if the model link is synchronized. self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
338
1
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Optional[Any]: lowerCAmelCase = [] for part_id in partition_order: lowerCAmelCase = df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect() for row_idx, row in enumerate(snake_case__ ): expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE_ ( ) -> str: lowerCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowerCAmelCase = spark.range(1_0_0 ).repartition(1 ) lowerCAmelCase = Spark(snake_case__ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=1_6 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 5_0 @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: lowerCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowerCAmelCase = spark.range(1_0 ).repartition(2 ) lowerCAmelCase = [1, 0] lowerCAmelCase = _generate_iterable_examples(snake_case__ , snake_case__ ) # Reverse the partitions. lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , snake_case__ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowerCAmelCase , lowerCAmelCase = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: lowerCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowerCAmelCase = spark.range(1_0 ).repartition(1 ) lowerCAmelCase = SparkExamplesIterable(snake_case__ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(snake_case__ ): assert row_id == f"0_{i}" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: lowerCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowerCAmelCase = spark.range(3_0 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch('''numpy.random.Generator''' ) as generator_mock: lowerCAmelCase = lambda snake_case__ : x.reverse() lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [2, 1, 0] ) lowerCAmelCase = SparkExamplesIterable(snake_case__ ).shuffle_data_sources(snake_case__ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(snake_case__ ): lowerCAmelCase , lowerCAmelCase = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: lowerCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowerCAmelCase = spark.range(2_0 ).repartition(4 ) # Partitions 0 and 2 lowerCAmelCase = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [0, 2] ) for i, (row_id, row_dict) in enumerate(snake_case__ ): lowerCAmelCase , lowerCAmelCase = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowerCAmelCase = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [1, 3] ) for i, (row_id, row_dict) in enumerate(snake_case__ ): lowerCAmelCase , lowerCAmelCase = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE_ ( ) -> Dict: lowerCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowerCAmelCase = spark.range(1_0_0 ).repartition(1 ) lowerCAmelCase = Spark(snake_case__ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
338
import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase = split_dict._to_yaml_list() assert len(snake_case__ ) == len(snake_case__ ) lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCAmelCase = None # the split name of split_dict takes over the name of the split info object lowerCAmelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]: # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
338
1
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''0.12.2'''): raise Exception('''requires fairseq >= 0.12.2''') if version.parse(fairseq.__version__) > version.parse('''2'''): raise Exception('''requires fairseq < v2''') logging.set_verbosity_info() lowercase__ : Any = logging.get_logger(__name__) lowercase__ : Any = '''Hello, World!''' lowercase__ : int = '''en_XX''' def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int: lowerCAmelCase = Path('''data_bin''' ) lowerCAmelCase = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(snake_case__ ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(snake_case__ ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , ) xmod.eval() # disable dropout print(snake_case__ ) lowerCAmelCase = xmod.model.encoder.sentence_encoder lowerCAmelCase = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: lowerCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our X-MOD config:''' , snake_case__ ) lowerCAmelCase = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ ) model.eval() # Now let's copy all the weights. # Embeddings lowerCAmelCase = xmod_sent_encoder.embed_tokens.weight lowerCAmelCase = xmod_sent_encoder.embed_positions.weight lowerCAmelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. lowerCAmelCase = xmod_sent_encoder.layernorm_embedding.weight lowerCAmelCase = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer lowerCAmelCase = model.roberta.encoder.layer[i] lowerCAmelCase = xmod_sent_encoder.layers[i] # self attention lowerCAmelCase = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError('''Dimensions of self-attention weights do not match.''' ) lowerCAmelCase = xmod_layer.self_attn.q_proj.weight lowerCAmelCase = xmod_layer.self_attn.q_proj.bias lowerCAmelCase = xmod_layer.self_attn.k_proj.weight lowerCAmelCase = xmod_layer.self_attn.k_proj.bias lowerCAmelCase = xmod_layer.self_attn.v_proj.weight lowerCAmelCase = xmod_layer.self_attn.v_proj.bias # self-attention output lowerCAmelCase = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('''Dimensions of self-attention output weights do not match.''' ) lowerCAmelCase = xmod_layer.self_attn.out_proj.weight lowerCAmelCase = xmod_layer.self_attn.out_proj.bias lowerCAmelCase = xmod_layer.self_attn_layer_norm.weight lowerCAmelCase = xmod_layer.self_attn_layer_norm.bias # intermediate lowerCAmelCase = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of intermediate weights do not match.''' ) lowerCAmelCase = xmod_layer.fca.weight lowerCAmelCase = xmod_layer.fca.bias # output lowerCAmelCase = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of feed-forward weights do not match.''' ) lowerCAmelCase = xmod_layer.fca.weight lowerCAmelCase = xmod_layer.fca.bias lowerCAmelCase = xmod_layer.final_layer_norm.weight lowerCAmelCase = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: lowerCAmelCase = xmod_layer.adapter_layer_norm.weight lowerCAmelCase = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError('''Lists of language adapters do not match.''' ) for lang_code, adapter in xmod_layer.adapter_modules.items(): lowerCAmelCase = bert_output.adapter_modules[lang_code] lowerCAmelCase = xmod_layer.adapter_modules[lang_code] lowerCAmelCase = from_adapter.fca.weight lowerCAmelCase = from_adapter.fca.bias lowerCAmelCase = from_adapter.fca.weight lowerCAmelCase = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: lowerCAmelCase = xmod_sent_encoder.layer_norm.weight lowerCAmelCase = xmod_sent_encoder.layer_norm.bias if classification_head: lowerCAmelCase = xmod.model.classification_heads['''mnli'''].dense.weight lowerCAmelCase = xmod.model.classification_heads['''mnli'''].dense.bias lowerCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight lowerCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head lowerCAmelCase = xmod.model.encoder.lm_head.dense.weight lowerCAmelCase = xmod.model.encoder.lm_head.dense.bias lowerCAmelCase = xmod.model.encoder.lm_head.layer_norm.weight lowerCAmelCase = xmod.model.encoder.lm_head.layer_norm.bias lowerCAmelCase = xmod.model.encoder.lm_head.weight lowerCAmelCase = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. lowerCAmelCase = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(snake_case__ ) lowerCAmelCase = model(snake_case__ )[0] if classification_head: lowerCAmelCase = xmod.model.classification_heads['''mnli'''](xmod.extract_features(snake_case__ ) ) else: lowerCAmelCase = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7 lowerCAmelCase = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) lowercase__ : List[Any] = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
338
import unittest import numpy as np def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray: lowerCAmelCase = np.shape(snake_case__ ) lowerCAmelCase = np.shape(snake_case__ ) lowerCAmelCase = np.shape(snake_case__ ) if shape_a[0] != shape_b[0]: lowerCAmelCase = ( '''Expected the same number of rows for A and B. ''' f"Instead found A of size {shape_a} and B of size {shape_b}" ) raise ValueError(snake_case__ ) if shape_b[1] != shape_c[1]: lowerCAmelCase = ( '''Expected the same number of columns for B and C. ''' f"Instead found B of size {shape_b} and C of size {shape_c}" ) raise ValueError(snake_case__ ) lowerCAmelCase = pseudo_inv if a_inv is None: try: lowerCAmelCase = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1], [6, 3]] ) lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.block([[a, b], [b.T, c]] ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s ) def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
338
1
import collections import os import re from pathlib import Path lowercase__ : List[Any] = '''src/transformers''' # Matches is_xxx_available() lowercase__ : Union[str, Any] = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} lowercase__ : Union[str, Any] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowercase__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available lowercase__ : Any = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") lowercase__ : List[str] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowercase__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", lowercase__ : Any = re.compile(R'''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], lowercase__ : Dict = re.compile(R'''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo lowercase__ : int = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: lowercase__ : str = re.compile(R'''^\s*try:''') # Catches a line with else: lowercase__ : Any = re.compile(R'''^\s*else:''') def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]: if _re_test_backend.search(snake_case__ ) is None: return None lowerCAmelCase = [b[0] for b in _re_backend.findall(snake_case__ )] backends.sort() return "_and_".join(snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Tuple: with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase = f.readlines() lowerCAmelCase = 0 while line_index < len(snake_case__ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(snake_case__ ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(snake_case__ ): lowerCAmelCase = _re_one_line_import_struct.search(snake_case__ ).groups()[0] lowerCAmelCase = re.findall(R'''\[([^\]]+)\]''' , snake_case__ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase = _re_import_struct_key_value.search(snake_case__ ) if single_line_import_search is not None: lowerCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(snake_case__ ) > 0] objects.extend(snake_case__ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase = lines[line_index] if _re_import_struct_add_one.search(snake_case__ ) is not None: objects.append(_re_import_struct_add_one.search(snake_case__ ).groups()[0] ) elif _re_import_struct_add_many.search(snake_case__ ) is not None: lowerCAmelCase = _re_import_struct_add_many.search(snake_case__ ).groups()[0].split(''', ''' ) lowerCAmelCase = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0] objects.extend(snake_case__ ) elif _re_between_brackets.search(snake_case__ ) is not None: lowerCAmelCase = _re_between_brackets.search(snake_case__ ).groups()[0].split(''', ''' ) lowerCAmelCase = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0] objects.extend(snake_case__ ) elif _re_quote_object.search(snake_case__ ) is not None: objects.append(_re_quote_object.search(snake_case__ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 1_2 + '''"''' ): objects.append(line[1_3:-3] ) line_index += 1 lowerCAmelCase = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase = [] while ( line_index < len(snake_case__ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase = lines[line_index] lowerCAmelCase = _re_import.search(snake_case__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(snake_case__ ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase = lines[line_index] lowerCAmelCase = _re_import.search(snake_case__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 1_2 ): objects.append(line[1_2:-2] ) line_index += 1 lowerCAmelCase = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> int: def find_duplicates(snake_case__ ): return [k for k, v in collections.Counter(snake_case__ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase = [] for key in import_dict_objects.keys(): lowerCAmelCase = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" ) lowerCAmelCase = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase = '''base imports''' if key == '''none''' else f"{key} backend" errors.append(f"Differences for {name}:" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f" {a} in TYPE_HINT but not in _import_structure." ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f" {a} in _import_structure but not in TYPE_HINT." ) return errors def SCREAMING_SNAKE_CASE_ ( ) -> int: lowerCAmelCase = [] for root, _, files in os.walk(snake_case__ ): if "__init__.py" in files: lowerCAmelCase = os.path.join(snake_case__ , '''__init__.py''' ) lowerCAmelCase = parse_init(snake_case__ ) if objects is not None: lowerCAmelCase = analyze_results(*snake_case__ ) if len(snake_case__ ) > 0: lowerCAmelCase = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append('''\n'''.join(snake_case__ ) ) if len(snake_case__ ) > 0: raise ValueError('''\n\n'''.join(snake_case__ ) ) def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]: lowerCAmelCase = [] for path, directories, files in os.walk(snake_case__ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(snake_case__ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(snake_case__ ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase = str((Path(snake_case__ ) / folder).relative_to(snake_case__ ) ) lowerCAmelCase = short_path.replace(os.path.sep , '''.''' ) submodules.append(snake_case__ ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase = str((Path(snake_case__ ) / fname).relative_to(snake_case__ ) ) lowerCAmelCase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(snake_case__ ) return submodules lowercase__ : int = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', '''models.esm.openfold_utils''', ] def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowerCAmelCase = direct_transformers_import(snake_case__ ) lowerCAmelCase = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(snake_case__ , '''__init__.py''' ) , '''r''' ) as f: lowerCAmelCase = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , snake_case__ ) ) ) lowerCAmelCase = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(snake_case__ ) > 0: lowerCAmelCase = '''\n'''.join(f"- {module}" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' f"{list_of_modules}\n" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
338
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowercase__ : Any = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: lowerCAmelCase = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) lowercase__ : List[Any] = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase = list(s_dict.keys() ) for key in keys: lowerCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ ) print(f"{key} -> {new_key}" ) lowerCAmelCase = s_dict.pop(snake_case__ ) return s_dict def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase , lowerCAmelCase = emb.weight.shape lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) lowerCAmelCase = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes: os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowerCAmelCase = os.path.basename(snake_case__ ) lowerCAmelCase = url.split('''/''' )[-2] lowerCAmelCase = os.path.join(snake_case__ , snake_case__ ) if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ): raise RuntimeError(f"{download_target} exists and is not a regular file" ) if os.path.isfile(snake_case__ ): lowerCAmelCase = open(snake_case__ , '''rb''' ).read() if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop: while True: lowerCAmelCase = source.read(8_1_9_2 ) if not buffer: break output.write(snake_case__ ) loop.update(len(snake_case__ ) ) lowerCAmelCase = open(snake_case__ , '''rb''' ).read() if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: if ".pt" not in checkpoint_path: lowerCAmelCase = _download(_MODELS[checkpoint_path] ) else: lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' ) lowerCAmelCase = original_checkpoint['''dims'''] lowerCAmelCase = original_checkpoint['''model_state_dict'''] lowerCAmelCase = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(snake_case__ ) rename_keys(snake_case__ ) lowerCAmelCase = True lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] lowerCAmelCase = WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ ) lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ ) if len(snake_case__ ) > 0 and not set(snake_case__ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f" but all the following weights are missing {missing}" ) if tie_embeds: lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: lowerCAmelCase = proj_out_weights model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase__ : List[str] = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowercase__ : int = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
338
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase__ : List[Any] = logging.get_logger(__name__) lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''} lowercase__ : Optional[int] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } lowercase__ : Any = { '''albert-base-v1''': 5_1_2, '''albert-large-v1''': 5_1_2, '''albert-xlarge-v1''': 5_1_2, '''albert-xxlarge-v1''': 5_1_2, '''albert-base-v2''': 5_1_2, '''albert-large-v2''': 5_1_2, '''albert-xlarge-v2''': 5_1_2, '''albert-xxlarge-v2''': 5_1_2, } lowercase__ : Tuple = '''▁''' class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase = ( AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token ) lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = do_lower_case lowerCAmelCase = remove_space lowerCAmelCase = keep_accents lowerCAmelCase = vocab_file lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->Any: return len(self.sp_model ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) ->int: lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any: if self.remove_space: lowerCAmelCase = ''' '''.join(inputs.strip().split() ) else: lowerCAmelCase = inputs lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] ) if self.do_lower_case: lowerCAmelCase = outputs.lower() return outputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] for piece in pieces: if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCAmelCase = cur_pieces[1:] else: lowerCAmelCase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__SCREAMING_SNAKE_CASE ) else: new_pieces.append(__SCREAMING_SNAKE_CASE ) return new_pieces def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int: return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int: return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]: lowerCAmelCase = [] lowerCAmelCase = '''''' lowerCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token lowerCAmelCase = True lowerCAmelCase = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string.strip() def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is not None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
338
from ...processing_utils import ProcessorMixin class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""] UpperCAmelCase_ : Optional[int] = """TvltImageProcessor""" UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]: super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = image_processor lowerCAmelCase = feature_extractor def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]: if images is None and audio is None: raise ValueError('''You need to specify either an `images` or `audio` input to process.''' ) lowerCAmelCase = None if images is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if images_mixed is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if audio is not None: lowerCAmelCase = self.feature_extractor( __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} if audio is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) if images is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) if images_mixed_dict is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) return output_dict @property def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.image_processor.model_input_names lowerCAmelCase = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
338
1
from collections.abc import Sequence def SCREAMING_SNAKE_CASE_ ( snake_case__ = None ) -> int: if nums is None or not nums: raise ValueError('''Input sequence should not be empty''' ) lowerCAmelCase = nums[0] for i in range(1 , len(snake_case__ ) ): lowerCAmelCase = nums[i] lowerCAmelCase = max(snake_case__ , ans + num , snake_case__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user lowercase__ : int = int(input('''Enter number of elements : ''').strip()) lowercase__ : Union[str, Any] = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
338
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]: lowerCAmelCase = len(snake_case__ ) for i in range(length - 1 ): lowerCAmelCase = i for k in range(i + 1 , snake_case__ ): if collection[k] < collection[least]: lowerCAmelCase = k if least != i: lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least]) return collection if __name__ == "__main__": lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip() lowercase__ : str = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
338
1
import os import re import shutil import sys import tempfile import unittest import black lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. lowercase__ : Dict = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) ) lowerCAmelCase = self.transformer_dir shutil.copy( os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = '''src/transformers''' shutil.rmtree(self.transformer_dir ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]: lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' ) with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f: f.write(__SCREAMING_SNAKE_CASE ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f: self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: # Base copy consistency self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , ) # Copy consistency with a really long name lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md'''] lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),''' ''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**''' ''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders''' ''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang''' ''' Luong, Quoc V. Le, Christopher D. Manning.''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文''' ''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自''' ''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather''' ''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,''' ''' Christopher D. Manning 发布。\n''' ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) self.assertFalse(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and''' ''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) # Check if the model link is synchronized. self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
338
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Union[str, Any]: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , ) return config def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float() model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = False UpperCAmelCase_ : Dict = (EsmForProteinFolding,) if is_torch_available() else () UpperCAmelCase_ : List[Any] = () UpperCAmelCase_ : Tuple = {} if is_torch_available() else {} UpperCAmelCase_ : List[str] = False def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = EsmFoldModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) @unittest.skip('''Does not support attention outputs''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: pass @unittest.skip def SCREAMING_SNAKE_CASE_ ( self ) ->Any: pass @unittest.skip('''Esm does not support embedding resizing''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''Esm does not support embedding resizing''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold does not support passing input embeds!''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass @unittest.skip('''ESMFold does not output hidden states in the normal way.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: pass @unittest.skip('''ESMfold does not output hidden states in the normal way.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold only has one output format.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold does not support input chunking.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: pass @unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold doesn\'t support data parallel.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass @require_torch class lowercase_ ( UpperCamelCase_ ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float() model.eval() lowerCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )['''positions'''] lowerCAmelCase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
338
1
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowercase_ ( UpperCamelCase_ ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str: with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file: lowerCAmelCase = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' ) lowerCAmelCase = input_file.read() lowerCAmelCase = regexp.search(__SCREAMING_SNAKE_CASE ) return match def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[Any]: with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file: lowerCAmelCase = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL ) lowerCAmelCase = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowerCAmelCase = regexp.finditer(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = Path('''./datasets''' ) lowerCAmelCase = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ): raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = Path('''./datasets''' ) lowerCAmelCase = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ): raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
338
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""] UpperCAmelCase_ : int = """OwlViTImageProcessor""" UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any: lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __SCREAMING_SNAKE_CASE , ) lowerCAmelCase = kwargs.pop('''feature_extractor''' ) lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int: if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )): lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )] elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = [] # Maximum number of queries across batch lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__SCREAMING_SNAKE_CASE ) != max_num_queries: lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE )) lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) encodings.append(__SCREAMING_SNAKE_CASE ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) lowerCAmelCase = BatchEncoding() lowerCAmelCase = input_ids lowerCAmelCase = attention_mask if query_images is not None: lowerCAmelCase = BatchEncoding() lowerCAmelCase = self.image_processor( __SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values lowerCAmelCase = query_pixel_values if images is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is not None and images is not None: lowerCAmelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCAmelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]: return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any: return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple: return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str: return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]: return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE_ ( self ) ->int: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor
338
1
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCamelCase_ ) , """Tatoeba directory does not exist.""" ) class lowercase_ ( unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = tempfile.mkdtemp() return TatoebaConverter(save_dir=__SCREAMING_SNAKE_CASE ) @slow def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: self.resolver.convert_models(['''heb-eng'''] ) @slow def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase , lowerCAmelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__SCREAMING_SNAKE_CASE ) assert mmeta["long_pair"] == "heb-eng"
338
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase__ : List[Any] = logging.get_logger(__name__) lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''} lowercase__ : Optional[int] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } lowercase__ : Any = { '''albert-base-v1''': 5_1_2, '''albert-large-v1''': 5_1_2, '''albert-xlarge-v1''': 5_1_2, '''albert-xxlarge-v1''': 5_1_2, '''albert-base-v2''': 5_1_2, '''albert-large-v2''': 5_1_2, '''albert-xlarge-v2''': 5_1_2, '''albert-xxlarge-v2''': 5_1_2, } lowercase__ : Tuple = '''▁''' class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase = ( AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token ) lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = do_lower_case lowerCAmelCase = remove_space lowerCAmelCase = keep_accents lowerCAmelCase = vocab_file lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->Any: return len(self.sp_model ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) ->int: lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any: if self.remove_space: lowerCAmelCase = ''' '''.join(inputs.strip().split() ) else: lowerCAmelCase = inputs lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] ) if self.do_lower_case: lowerCAmelCase = outputs.lower() return outputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] for piece in pieces: if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCAmelCase = cur_pieces[1:] else: lowerCAmelCase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__SCREAMING_SNAKE_CASE ) else: new_pieces.append(__SCREAMING_SNAKE_CASE ) return new_pieces def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int: return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int: return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]: lowerCAmelCase = [] lowerCAmelCase = '''''' lowerCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token lowerCAmelCase = True lowerCAmelCase = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string.strip() def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is not None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
338
1
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer lowercase__ : Union[str, Any] = logging.get_logger(__name__) lowercase__ : Tuple = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowercase__ : str = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } lowercase__ : Union[str, Any] = { '''facebook/blenderbot_small-90M''': 5_1_2, } class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = VOCAB_FILES_NAMES UpperCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : Any = BlenderbotSmallTokenizer def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) ->str: super().__init__( ByteLevelBPETokenizer( vocab=__SCREAMING_SNAKE_CASE , merges=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , ) , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = add_prefix_space def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->List[str]: lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
338
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,) UpperCAmelCase_ : int = (("""num_inference_steps""", 25),) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str: lowerCAmelCase = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, } config.update(**__SCREAMING_SNAKE_CASE ) return config def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCAmelCase , lowerCAmelCase = sample, sample for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]: if scheduler is None: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = 10 lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample return sample def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ): lowerCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] lowerCAmelCase = scheduler.timesteps[5] lowerCAmelCase = scheduler.timesteps[6] lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: # make sure that iterating over schedulers with same config names gives same results # for defaults lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() ) lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = self.full_loop( solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , ) assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.full_loop() lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = 10 lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa
338
1
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase_ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = FunnelTokenizer UpperCAmelCase_ : List[str] = FunnelTokenizerFast UpperCAmelCase_ : Optional[int] = True UpperCAmelCase_ : Any = True def SCREAMING_SNAKE_CASE_ ( self ) ->Any: super().setUp() lowerCAmelCase = [ '''<unk>''', '''<cls>''', '''<sep>''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Dict: return FunnelTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]: return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[Any]: lowerCAmelCase = '''UNwant\u00E9d,running''' lowerCAmelCase = '''unwanted, running''' return input_text, output_text def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = self.tokenizer_class(self.vocab_file ) lowerCAmelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.get_tokenizers(do_lower_case=__SCREAMING_SNAKE_CASE ) for tokenizer in tokenizers: lowerCAmelCase = tokenizer('''UNwant\u00E9d,running''' ) lowerCAmelCase = len(inputs['''input_ids'''] ) - 1 self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len ) lowerCAmelCase = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' ) self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
338
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowercase_ ( unittest.TestCase ): """simple docstring""" @property def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: torch.manual_seed(0 ) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = self.dummy_uncond_unet lowerCAmelCase = KarrasVeScheduler() lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = '''google/ncsnpp-celebahq-256''' lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = KarrasVeScheduler() lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
338
1
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: if isinstance(snake_case__ , snake_case__ ): raise TypeError('''\'float\' object cannot be interpreted as an integer''' ) if isinstance(snake_case__ , snake_case__ ): raise TypeError('''\'str\' object cannot be interpreted as an integer''' ) if num == 0: return "0b0" lowerCAmelCase = False if num < 0: lowerCAmelCase = True lowerCAmelCase = -num lowerCAmelCase = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(snake_case__ ) for e in binary ) return "0b" + "".join(str(snake_case__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
338
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch lowercase__ : Dict = logging.get_logger(__name__) @add_end_docstrings( UpperCamelCase_ , r""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ , ) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray: if self.framework == "tf": lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ) else: raise ValueError('''Unsupported framework''' ) return masked_index def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray: lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]: if return_tensors is None: lowerCAmelCase = self.framework lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model_inputs['''input_ids'''] return model_outputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: lowerCAmelCase = target_ids.shape[0] lowerCAmelCase = model_outputs['''input_ids'''][0] lowerCAmelCase = model_outputs['''logits'''] if self.framework == "tf": lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] lowerCAmelCase = outputs.numpy() lowerCAmelCase = outputs[0, masked_index, :] lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) if target_ids is not None: lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) ) lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 ) lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy() else: lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample lowerCAmelCase = outputs[0, masked_index, :] lowerCAmelCase = logits.softmax(dim=-1 ) if target_ids is not None: lowerCAmelCase = probs[..., target_ids] lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] lowerCAmelCase = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): lowerCAmelCase = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place lowerCAmelCase = input_ids.numpy().copy() if target_ids is not None: lowerCAmelCase = target_ids[p].tolist() lowerCAmelCase = p # Filter padding out: lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence} row.append(__SCREAMING_SNAKE_CASE ) result.append(__SCREAMING_SNAKE_CASE ) if single_mask: return result[0] return result def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = [targets] try: lowerCAmelCase = self.tokenizer.get_vocab() except Exception: lowerCAmelCase = {} lowerCAmelCase = [] for target in targets: lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if id_ is None: lowerCAmelCase = self.tokenizer( __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids'''] if len(__SCREAMING_SNAKE_CASE ) == 0: logger.warning( F"The specified target token `{target}` does not exist in the model vocabulary. " '''We cannot replace it with anything meaningful, ignoring it''' ) continue lowerCAmelCase = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"The specified target token `{target}` does not exist in the model vocabulary. " F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." ) target_ids.append(id_ ) lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) ) if len(__SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) return target_ids def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict: lowerCAmelCase = {} if targets is not None: lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = target_ids if top_k is not None: lowerCAmelCase = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]: lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1: return outputs[0] return outputs
338
1
import math def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> bool: lowerCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1 / 1_2_3_4_5 ) -> int: lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 3 while True: lowerCAmelCase = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(snake_case__ ): lowerCAmelCase = int(snake_case__ ) total_partitions += 1 if check_partition_perfect(snake_case__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(snake_case__ ) integer += 1 if __name__ == "__main__": print(f'{solution() = }')
338
from typing import TYPE_CHECKING from ...utils import _LazyModule lowercase__ : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
338
1
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : int = { '''vocab_file''': '''vocab.json''', '''tokenizer_config_file''': '''tokenizer_config.json''', '''merges_file''': '''merges.txt''', } lowercase__ : Optional[int] = { '''vocab_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json''' ), }, '''tokenizer_config_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json''' ), }, '''merges_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt''' ), }, } lowercase__ : Tuple = '''</w>''' lowercase__ : Dict = '''@@ ''' def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]: lowerCAmelCase = set() lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase = char return pairs # Speech2Text2 has no max input length lowercase__ : List[Any] = {'''facebook/s2t-wav2vec2-large-en-de''': 1_0_2_4} class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : int = VOCAB_FILES_NAMES UpperCAmelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : Union[str, Any] = ["""input_ids""", """attention_mask"""] def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) ->Optional[Any]: super().__init__( unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = do_lower_case with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle: lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." ) lowerCAmelCase = None lowerCAmelCase = None else: with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1] lowerCAmelCase = [tuple(merge.split()[:2] ) for merge in merges] lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) lowerCAmelCase = {} @property def SCREAMING_SNAKE_CASE_ ( self ) ->int: return len(self.decoder ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: return dict(self.encoder , **self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[Any]: lowerCAmelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] lowerCAmelCase = get_pairs(__SCREAMING_SNAKE_CASE ) if not pairs: return token while True: lowerCAmelCase = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase , lowerCAmelCase = bigram lowerCAmelCase = [] lowerCAmelCase = 0 while i < len(__SCREAMING_SNAKE_CASE ): try: lowerCAmelCase = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase = j if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase = tuple(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = new_word if len(__SCREAMING_SNAKE_CASE ) == 1: break else: lowerCAmelCase = get_pairs(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = ''' '''.join(__SCREAMING_SNAKE_CASE ) if word == "\n " + BPE_TOKEN_MERGES: lowerCAmelCase = '''\n''' + BPE_TOKEN_MERGES if word.endswith(__SCREAMING_SNAKE_CASE ): lowerCAmelCase = word.replace(__SCREAMING_SNAKE_CASE , '''''' ) lowerCAmelCase = word.replace(''' ''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = word return word def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Dict: if self.bpe_ranks is None: raise ValueError( '''This tokenizer was instantiated without a `merges.txt` file, so''' ''' that it can only be used for decoding, not for encoding.''' '''Make sure to provide `merges.txt` file at instantiation to enable ''' '''encoding.''' ) if self.do_lower_case: lowerCAmelCase = text.lower() lowerCAmelCase = text.split() lowerCAmelCase = [] for token in text: if token: split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(''' ''' ) ) ) return split_tokens def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int: return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str: lowerCAmelCase = self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token ) return result def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str: lowerCAmelCase = ''' '''.join(__SCREAMING_SNAKE_CASE ) # make sure @@ tokens are concatenated lowerCAmelCase = ''''''.join(string.split(__SCREAMING_SNAKE_CASE ) ) return string def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + '''\n''' ) lowerCAmelCase = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive." ''' Please check that the tokenizer is not corrupted!''' ) lowerCAmelCase = token_index writer.write(''' '''.join(__SCREAMING_SNAKE_CASE ) + '''\n''' ) index += 1 return (vocab_file, merges_file)
338
lowercase__ : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def SCREAMING_SNAKE_CASE_ ( ) -> None: lowerCAmelCase = input('''Enter message: ''' ) lowerCAmelCase = input('''Enter key [alphanumeric]: ''' ) lowerCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' ) if mode.lower().startswith('''e''' ): lowerCAmelCase = '''encrypt''' lowerCAmelCase = encrypt_message(snake_case__ , snake_case__ ) elif mode.lower().startswith('''d''' ): lowerCAmelCase = '''decrypt''' lowerCAmelCase = decrypt_message(snake_case__ , snake_case__ ) print(f"\n{mode.title()}ed message:" ) print(snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: return translate_message(snake_case__ , snake_case__ , '''encrypt''' ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: return translate_message(snake_case__ , snake_case__ , '''decrypt''' ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str: lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = key.upper() for symbol in message: lowerCAmelCase = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(snake_case__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(snake_case__ ): lowerCAmelCase = 0 else: translated.append(snake_case__ ) return "".join(snake_case__ ) if __name__ == "__main__": main()
338
1
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : str = (DDPMScheduler,) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]: lowerCAmelCase = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__SCREAMING_SNAKE_CASE ) return config def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ): if i == len(__SCREAMING_SNAKE_CASE ) - 1: lowerCAmelCase = -1 else: lowerCAmelCase = timesteps[i + 1] lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 1, 0] lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
338
from collections import defaultdict from math import ceil, sqrt def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int: lowerCAmelCase = defaultdict(snake_case__ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: lowerCAmelCase = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: lowerCAmelCase = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(snake_case__ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(f'{solution() = }')
338
1
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase__ : str = logging.get_logger(__name__) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Any = """AutoTokenizer""" UpperCAmelCase_ : Optional[int] = ["""tokenizer"""] UpperCAmelCase_ : str = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]: super().__init__(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = speaker_embeddings @classmethod def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple: if speaker_embeddings_dict_path is not None: lowerCAmelCase = get_file_from_repo( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , ) if speaker_embeddings_path is None: logger.warning( F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." ) lowerCAmelCase = None else: with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json: lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = None lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int: if self.speaker_embeddings is not None: os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} lowerCAmelCase = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" ) lowerCAmelCase = tmp_dict with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = self.speaker_embeddings[voice_preset] lowerCAmelCase = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." ) lowerCAmelCase = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , ) if path is None: raise ValueError( F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." ) lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE ) return voice_preset_dict def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple: for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F"Voice preset unrecognized, missing {key} as a key." ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int: if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if ( isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE ) else: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ): lowerCAmelCase = voice_preset + '''.npz''' lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE ) if voice_preset is not None: self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.tokenizer( __SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) if voice_preset is not None: lowerCAmelCase = voice_preset return encoded_text
338
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]: assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = features.copy() if features else default_expected_features lowerCAmelCase = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: if issubclass(snake_case__ , snake_case__ ): lowerCAmelCase = text_path elif issubclass(snake_case__ , snake_case__ ): lowerCAmelCase = [text_path] lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]: assert isinstance(snake_case__ , snake_case__ ) for split in splits: lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: lowerCAmelCase = tmp_path / '''cache''' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = features.copy() if features else default_expected_features lowerCAmelCase = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any: if split: lowerCAmelCase = {split: text_path} else: lowerCAmelCase = '''train''' lowerCAmelCase = {'''train''': text_path, '''test''': text_path} lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
338
1
from __future__ import annotations from fractions import Fraction def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bool: return ( num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> list[str]: lowerCAmelCase = [] lowerCAmelCase = 1_1 lowerCAmelCase = int('''1''' + '''0''' * digit_len ) for num in range(snake_case__ , snake_case__ ): while den <= 9_9: if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0): if is_digit_cancelling(snake_case__ , snake_case__ ): solutions.append(f"{num}/{den}" ) den += 1 num += 1 lowerCAmelCase = 1_0 return solutions def SCREAMING_SNAKE_CASE_ ( snake_case__ = 2 ) -> int: lowerCAmelCase = 1.0 for fraction in fraction_list(snake_case__ ): lowerCAmelCase = Fraction(snake_case__ ) result *= frac.denominator / frac.numerator return int(snake_case__ ) if __name__ == "__main__": print(solution())
338
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: if isinstance(snake_case__ , snake_case__ ): raise TypeError('''\'float\' object cannot be interpreted as an integer''' ) if isinstance(snake_case__ , snake_case__ ): raise TypeError('''\'str\' object cannot be interpreted as an integer''' ) if num == 0: return "0b0" lowerCAmelCase = False if num < 0: lowerCAmelCase = True lowerCAmelCase = -num lowerCAmelCase = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(snake_case__ ) for e in binary ) return "0b" + "".join(str(snake_case__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
338
1
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1 , snake_case__ = 1_0_0_0 ) -> int: lowerCAmelCase = 1 lowerCAmelCase = 0 for divide_by_number in range(snake_case__ , digit + 1 ): lowerCAmelCase = [] lowerCAmelCase = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(snake_case__ ): lowerCAmelCase = len(snake_case__ ) lowerCAmelCase = divide_by_number else: has_been_divided.append(snake_case__ ) lowerCAmelCase = now_divide * 1_0 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
338
class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any: lowerCAmelCase = name lowerCAmelCase = value lowerCAmelCase = weight def __repr__( self ) ->str: return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: return self.value def SCREAMING_SNAKE_CASE_ ( self ) ->int: return self.name def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: return self.weight def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: return self.value / self.weight def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int: lowerCAmelCase = [] for i in range(len(snake_case__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ ) lowerCAmelCase = [] lowerCAmelCase , lowerCAmelCase = 0.0, 0.0 for i in range(len(snake_case__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: pass if __name__ == "__main__": import doctest doctest.testmod()
338
1
from __future__ import annotations from typing import Any class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE ) ->None: lowerCAmelCase = num_of_nodes lowerCAmelCase = [] lowerCAmelCase = {} def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->None: self.m_edges.append([u_node, v_node, weight] ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int: if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->None: if self.m_component[u_node] != u_node: for k in self.m_component: lowerCAmelCase = self.find_component(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->None: if component_size[u_node] <= component_size[v_node]: lowerCAmelCase = v_node component_size[v_node] += component_size[u_node] self.set_component(__SCREAMING_SNAKE_CASE ) elif component_size[u_node] >= component_size[v_node]: lowerCAmelCase = self.find_component(__SCREAMING_SNAKE_CASE ) component_size[u_node] += component_size[v_node] self.set_component(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) lowerCAmelCase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edge lowerCAmelCase = self.m_component[u] lowerCAmelCase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): lowerCAmelCase = [u, v, w] for edge in minimum_weight_edge: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edge lowerCAmelCase = self.m_component[u] lowerCAmelCase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print(F"Added edge [{u} - {v}]\nAdded weight: {w}\n" ) num_of_components -= 1 lowerCAmelCase = [-1] * self.m_num_of_nodes print(F"The total weight of the minimal spanning tree is: {mst_weight}" ) def SCREAMING_SNAKE_CASE_ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
338
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). lowercase__ : Optional[int] = [0, 2_5, 5_0] lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5] lowercase__ : int = fuzz.membership.trimf(X, abca) lowercase__ : Tuple = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. lowercase__ : List[str] = np.ones(7_5) lowercase__ : Any = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] lowercase__ : Any = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) lowercase__ : str = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('''Young''') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('''Middle aged''') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('''union''') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('''intersection''') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('''complement_a''') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('''difference a/b''') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('''alg_sum''') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('''alg_product''') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('''bdd_sum''') plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title('''bdd_difference''') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
338
1
from typing import TYPE_CHECKING from ...utils import _LazyModule lowercase__ : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
338
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : str = (DDPMScheduler,) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]: lowerCAmelCase = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__SCREAMING_SNAKE_CASE ) return config def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ): if i == len(__SCREAMING_SNAKE_CASE ) - 1: lowerCAmelCase = -1 else: lowerCAmelCase = timesteps[i + 1] lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 1, 0] lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
338
1
from math import sqrt def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 ) -> int: lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(snake_case__ , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'{solution() = }')
338
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase__ : str = logging.get_logger(__name__) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Any = """AutoTokenizer""" UpperCAmelCase_ : Optional[int] = ["""tokenizer"""] UpperCAmelCase_ : str = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]: super().__init__(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = speaker_embeddings @classmethod def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple: if speaker_embeddings_dict_path is not None: lowerCAmelCase = get_file_from_repo( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , ) if speaker_embeddings_path is None: logger.warning( F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." ) lowerCAmelCase = None else: with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json: lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = None lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int: if self.speaker_embeddings is not None: os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} lowerCAmelCase = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" ) lowerCAmelCase = tmp_dict with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = self.speaker_embeddings[voice_preset] lowerCAmelCase = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." ) lowerCAmelCase = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , ) if path is None: raise ValueError( F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." ) lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE ) return voice_preset_dict def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple: for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F"Voice preset unrecognized, missing {key} as a key." ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int: if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if ( isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE ) else: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ): lowerCAmelCase = voice_preset + '''.npz''' lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE ) if voice_preset is not None: self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.tokenizer( __SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) if voice_preset is not None: lowerCAmelCase = voice_preset return encoded_text
338
1
import random def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ = False ) -> dict: lowerCAmelCase = {i: [] for i in range(snake_case__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(snake_case__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(snake_case__ ): for j in range(i + 1 , snake_case__ ): if random.random() < probability: graph[i].append(snake_case__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(snake_case__ ) return graph def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> dict: return { i: [j for j in range(snake_case__ ) if i != j] for i in range(snake_case__ ) } if __name__ == "__main__": import doctest doctest.testmod()
338
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
338
1
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> list: if len(snake_case__ ) == 0: return [] lowerCAmelCase , lowerCAmelCase = min(snake_case__ ), max(snake_case__ ) lowerCAmelCase = int(max_value - min_value ) + 1 lowerCAmelCase = [[] for _ in range(snake_case__ )] for i in my_list: buckets[int(i - min_value )].append(snake_case__ ) return [v for bucket in buckets for v in sorted(snake_case__ )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
338
import os import re import shutil import sys import tempfile import unittest import black lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. lowercase__ : Dict = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) ) lowerCAmelCase = self.transformer_dir shutil.copy( os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = '''src/transformers''' shutil.rmtree(self.transformer_dir ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]: lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' ) with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f: f.write(__SCREAMING_SNAKE_CASE ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f: self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: # Base copy consistency self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , ) # Copy consistency with a really long name lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md'''] lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),''' ''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**''' ''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders''' ''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang''' ''' Luong, Quoc V. Le, Christopher D. Manning.''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文''' ''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自''' ''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather''' ''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,''' ''' Christopher D. Manning 发布。\n''' ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) self.assertFalse(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and''' ''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) # Check if the model link is synchronized. self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
338
1
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures lowercase__ : Tuple = logging.get_logger(__name__) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} ) UpperCAmelCase_ : str = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) UpperCAmelCase_ : int = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) UpperCAmelCase_ : bool = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = self.task_name.lower() class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : int = """train""" UpperCAmelCase_ : Any = """dev""" UpperCAmelCase_ : Tuple = """test""" class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : GlueDataTrainingArguments UpperCAmelCase_ : str UpperCAmelCase_ : List[InputFeatures] def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = Split.train , __SCREAMING_SNAKE_CASE = None , ) ->Dict: warnings.warn( '''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __SCREAMING_SNAKE_CASE , ) lowerCAmelCase = args lowerCAmelCase = glue_processors[args.task_name]() lowerCAmelCase = glue_output_modes[args.task_name] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): try: lowerCAmelCase = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) # Load data features from cache or dataset file lowerCAmelCase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}" , ) lowerCAmelCase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCAmelCase , lowerCAmelCase = label_list[2], label_list[1] lowerCAmelCase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCAmelCase = cached_features_file + '''.lock''' with FileLock(__SCREAMING_SNAKE_CASE ): if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache: lowerCAmelCase = time.time() lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE ) logger.info( F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) else: logger.info(F"Creating features from dataset file at {args.data_dir}" ) if mode == Split.dev: lowerCAmelCase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowerCAmelCase = self.processor.get_test_examples(args.data_dir ) else: lowerCAmelCase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowerCAmelCase = examples[:limit_length] lowerCAmelCase = glue_convert_examples_to_features( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_length=args.max_seq_length , label_list=__SCREAMING_SNAKE_CASE , output_mode=self.output_mode , ) lowerCAmelCase = time.time() torch.save(self.features , __SCREAMING_SNAKE_CASE ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self ) ->Optional[int]: return len(self.features ) def __getitem__( self , __SCREAMING_SNAKE_CASE ) ->InputFeatures: return self.features[i] def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: return self.label_list
338
import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase = split_dict._to_yaml_list() assert len(snake_case__ ) == len(snake_case__ ) lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCAmelCase = None # the split name of split_dict takes over the name of the split info object lowerCAmelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]: # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
338
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase__ : int = { '''configuration_conditional_detr''': [ '''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConditionalDetrConfig''', '''ConditionalDetrOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Any = ['''ConditionalDetrFeatureExtractor'''] lowercase__ : Dict = ['''ConditionalDetrImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[int] = [ '''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConditionalDetrForObjectDetection''', '''ConditionalDetrForSegmentation''', '''ConditionalDetrModel''', '''ConditionalDetrPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys lowercase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
338
import unittest import numpy as np def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray: lowerCAmelCase = np.shape(snake_case__ ) lowerCAmelCase = np.shape(snake_case__ ) lowerCAmelCase = np.shape(snake_case__ ) if shape_a[0] != shape_b[0]: lowerCAmelCase = ( '''Expected the same number of rows for A and B. ''' f"Instead found A of size {shape_a} and B of size {shape_b}" ) raise ValueError(snake_case__ ) if shape_b[1] != shape_c[1]: lowerCAmelCase = ( '''Expected the same number of columns for B and C. ''' f"Instead found B of size {shape_b} and C of size {shape_c}" ) raise ValueError(snake_case__ ) lowerCAmelCase = pseudo_inv if a_inv is None: try: lowerCAmelCase = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1], [6, 3]] ) lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.block([[a, b], [b.T, c]] ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s ) def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
338
1
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]: # Load configuration defined in the metadata file with open(snake_case__ ) as metadata_file: lowerCAmelCase = json.load(snake_case__ ) lowerCAmelCase = LukeConfig(use_entity_aware_attention=snake_case__ , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' )['''module'''] # Load the entity vocab file lowerCAmelCase = load_original_entity_vocab(snake_case__ ) # add an entry for [MASK2] lowerCAmelCase = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 lowerCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks lowerCAmelCase = AddedToken('''<ent>''' , lstrip=snake_case__ , rstrip=snake_case__ ) lowerCAmelCase = AddedToken('''<ent2>''' , lstrip=snake_case__ , rstrip=snake_case__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}" ) tokenizer.save_pretrained(snake_case__ ) with open(os.path.join(snake_case__ , '''tokenizer_config.json''' ) , '''r''' ) as f: lowerCAmelCase = json.load(snake_case__ ) lowerCAmelCase = '''MLukeTokenizer''' with open(os.path.join(snake_case__ , '''tokenizer_config.json''' ) , '''w''' ) as f: json.dump(snake_case__ , snake_case__ ) with open(os.path.join(snake_case__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(snake_case__ , snake_case__ ) lowerCAmelCase = MLukeTokenizer.from_pretrained(snake_case__ ) # Initialize the embeddings of the special tokens lowerCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0] lowerCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0] lowerCAmelCase = state_dict['''embeddings.word_embeddings.weight'''] lowerCAmelCase = word_emb[ent_init_index].unsqueeze(0 ) lowerCAmelCase = word_emb[enta_init_index].unsqueeze(0 ) lowerCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: lowerCAmelCase = state_dict[bias_name] lowerCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 ) lowerCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 ) lowerCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: lowerCAmelCase = f"encoder.layer.{layer_index}.attention.self." lowerCAmelCase = state_dict[prefix + matrix_name] lowerCAmelCase = state_dict[prefix + matrix_name] lowerCAmelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks lowerCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight'''] lowerCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) lowerCAmelCase = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' lowerCAmelCase = state_dict['''entity_predictions.bias'''] lowerCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) lowerCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] ) lowerCAmelCase = LukeForMaskedLM(config=snake_case__ ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) lowerCAmelCase = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): lowerCAmelCase = state_dict[key] else: lowerCAmelCase = state_dict[key] lowerCAmelCase , lowerCAmelCase = model.load_state_dict(snake_case__ , strict=snake_case__ ) if set(snake_case__ ) != {"luke.embeddings.position_ids"}: raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" ) if set(snake_case__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"Unexpected missing_keys: {missing_keys}" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs lowerCAmelCase = MLukeTokenizer.from_pretrained(snake_case__ , task='''entity_classification''' ) lowerCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' lowerCAmelCase = (0, 9) lowerCAmelCase = tokenizer(snake_case__ , entity_spans=[span] , return_tensors='''pt''' ) lowerCAmelCase = model(**snake_case__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base lowerCAmelCase = torch.Size((1, 3_3, 7_6_8) ) lowerCAmelCase = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base lowerCAmelCase = torch.Size((1, 1, 7_6_8) ) lowerCAmelCase = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case__ , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction lowerCAmelCase = MLukeTokenizer.from_pretrained(snake_case__ ) lowerCAmelCase = '''Tokyo is the capital of <mask>.''' lowerCAmelCase = (2_4, 3_0) lowerCAmelCase = tokenizer(snake_case__ , entity_spans=[span] , return_tensors='''pt''' ) lowerCAmelCase = model(**snake_case__ ) lowerCAmelCase = encoding['''input_ids'''][0].tolist() lowerCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) lowerCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(snake_case__ ) lowerCAmelCase = outputs.entity_logits[0][0].argmax().item() lowerCAmelCase = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(snake_case__ ) ) model.save_pretrained(snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]: lowerCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]'''] lowerCAmelCase = [json.loads(snake_case__ ) for line in open(snake_case__ )] lowerCAmelCase = {} for entry in data: lowerCAmelCase = entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: lowerCAmelCase = entity_id break lowerCAmelCase = f"{language}:{entity_name}" lowerCAmelCase = entity_id return new_mapping if __name__ == "__main__": lowercase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) lowercase__ : int = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
338
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowercase__ : Any = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: lowerCAmelCase = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) lowercase__ : List[Any] = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase = list(s_dict.keys() ) for key in keys: lowerCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ ) print(f"{key} -> {new_key}" ) lowerCAmelCase = s_dict.pop(snake_case__ ) return s_dict def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase , lowerCAmelCase = emb.weight.shape lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) lowerCAmelCase = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes: os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowerCAmelCase = os.path.basename(snake_case__ ) lowerCAmelCase = url.split('''/''' )[-2] lowerCAmelCase = os.path.join(snake_case__ , snake_case__ ) if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ): raise RuntimeError(f"{download_target} exists and is not a regular file" ) if os.path.isfile(snake_case__ ): lowerCAmelCase = open(snake_case__ , '''rb''' ).read() if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop: while True: lowerCAmelCase = source.read(8_1_9_2 ) if not buffer: break output.write(snake_case__ ) loop.update(len(snake_case__ ) ) lowerCAmelCase = open(snake_case__ , '''rb''' ).read() if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: if ".pt" not in checkpoint_path: lowerCAmelCase = _download(_MODELS[checkpoint_path] ) else: lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' ) lowerCAmelCase = original_checkpoint['''dims'''] lowerCAmelCase = original_checkpoint['''model_state_dict'''] lowerCAmelCase = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(snake_case__ ) rename_keys(snake_case__ ) lowerCAmelCase = True lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] lowerCAmelCase = WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ ) lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ ) if len(snake_case__ ) > 0 and not set(snake_case__ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f" but all the following weights are missing {missing}" ) if tie_embeds: lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: lowerCAmelCase = proj_out_weights model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase__ : List[str] = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowercase__ : int = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
338
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType lowercase__ : int = logging.get_logger(__name__) lowercase__ : Tuple = { '''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''', } # fmt: off lowercase__ : Union[str, Any] = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5, 7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7, 1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1, 4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6, 1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1, 1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9, 3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1 ] lowercase__ : Optional[Any] = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3, 8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7, 3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7, 7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3, 1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5, 2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5, 4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2 ] class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : int = """whisper""" UpperCAmelCase_ : Dict = ["""past_key_values"""] UpperCAmelCase_ : Any = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , __SCREAMING_SNAKE_CASE=51865 , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=1536 , __SCREAMING_SNAKE_CASE=1536 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=50257 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1500 , __SCREAMING_SNAKE_CASE=448 , __SCREAMING_SNAKE_CASE=50256 , __SCREAMING_SNAKE_CASE=50256 , __SCREAMING_SNAKE_CASE=50256 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=[220, 50256] , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.0_5 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=7 , **__SCREAMING_SNAKE_CASE , ) ->Optional[Any]: lowerCAmelCase = vocab_size lowerCAmelCase = num_mel_bins lowerCAmelCase = d_model lowerCAmelCase = encoder_layers lowerCAmelCase = encoder_attention_heads lowerCAmelCase = decoder_layers lowerCAmelCase = decoder_attention_heads lowerCAmelCase = decoder_ffn_dim lowerCAmelCase = encoder_ffn_dim lowerCAmelCase = dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = activation_function lowerCAmelCase = init_std lowerCAmelCase = encoder_layerdrop lowerCAmelCase = decoder_layerdrop lowerCAmelCase = use_cache lowerCAmelCase = encoder_layers lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True lowerCAmelCase = max_source_positions lowerCAmelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. lowerCAmelCase = classifier_proj_size lowerCAmelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase = apply_spec_augment lowerCAmelCase = mask_time_prob lowerCAmelCase = mask_time_length lowerCAmelCase = mask_time_min_masks lowerCAmelCase = mask_feature_prob lowerCAmelCase = mask_feature_length lowerCAmelCase = mask_feature_min_masks lowerCAmelCase = median_filter_width super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , suppress_tokens=__SCREAMING_SNAKE_CASE , begin_suppress_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" @property def SCREAMING_SNAKE_CASE_ ( self ) ->Mapping[str, Mapping[int, str]]: lowerCAmelCase = OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase = {0: '''batch'''} else: lowerCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 22050 , __SCREAMING_SNAKE_CASE = 5.0 , __SCREAMING_SNAKE_CASE = 220 , ) ->Mapping[str, Any]: lowerCAmelCase = OrderedDict() lowerCAmelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , time_duration=__SCREAMING_SNAKE_CASE , frequency=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = encoder_inputs['''input_features'''].shape[2] lowerCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length lowerCAmelCase = super().generate_dummy_inputs( preprocessor.tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = encoder_inputs.pop('''input_features''' ) lowerCAmelCase = decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: lowerCAmelCase = decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def SCREAMING_SNAKE_CASE_ ( self ) ->float: return 1e-3
338
from ...processing_utils import ProcessorMixin class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""] UpperCAmelCase_ : Optional[int] = """TvltImageProcessor""" UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]: super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = image_processor lowerCAmelCase = feature_extractor def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]: if images is None and audio is None: raise ValueError('''You need to specify either an `images` or `audio` input to process.''' ) lowerCAmelCase = None if images is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if images_mixed is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if audio is not None: lowerCAmelCase = self.feature_extractor( __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} if audio is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) if images is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) if images_mixed_dict is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) return output_dict @property def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.image_processor.model_input_names lowerCAmelCase = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
338
1
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = IFPipeline UpperCAmelCase_ : int = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""} UpperCAmelCase_ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS UpperCAmelCase_ : int = PipelineTesterMixin.required_optional_params - {"""latents"""} def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: return self._get_dummy_components() def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) ->List[str]: if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ): lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: self._test_save_load_local() def SCREAMING_SNAKE_CASE_ ( self ) ->str: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: # if lowerCAmelCase = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa ) lowerCAmelCase = IFSuperResolutionPipeline.from_pretrained( '''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('''cuda''' ) lowerCAmelCase , lowerCAmelCase = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() lowerCAmelCase = None lowerCAmelCase = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img lowerCAmelCase = IFImgaImgPipeline(**pipe_a.components ) lowerCAmelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting lowerCAmelCase = IFInpaintingPipeline(**pipe_a.components ) lowerCAmelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->int: # pipeline 1 _start_torch_memory_measurement() lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCAmelCase = pipe_a( prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , ) lowerCAmelCase = output.images[0] assert image.shape == (64, 64, 3) lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' ) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # pipeline 2 _start_torch_memory_measurement() lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = pipe_a( prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , ) lowerCAmelCase = output.images[0] assert image.shape == (256, 256, 3) lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[Any]: # pipeline 1 _start_torch_memory_measurement() lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCAmelCase = pipe_a( prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , ) lowerCAmelCase = output.images[0] assert image.shape == (64, 64, 3) lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' ) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # pipeline 2 _start_torch_memory_measurement() lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = pipe_a( prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , original_image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , ) lowerCAmelCase = output.images[0] assert image.shape == (256, 256, 3) lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]: # pipeline 1 _start_torch_memory_measurement() lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCAmelCase = pipe_a( prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , ) lowerCAmelCase = output.images[0] assert image.shape == (64, 64, 3) lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' ) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # pipeline 2 _start_torch_memory_measurement() lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = pipe_a( prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , original_image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , ) lowerCAmelCase = output.images[0] assert image.shape == (256, 256, 3) lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( ) -> Dict: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
338
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]: lowerCAmelCase = len(snake_case__ ) for i in range(length - 1 ): lowerCAmelCase = i for k in range(i + 1 , snake_case__ ): if collection[k] < collection[least]: lowerCAmelCase = k if least != i: lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least]) return collection if __name__ == "__main__": lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip() lowercase__ : str = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
338
1
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int: if not isinstance(snake_case__ , snake_case__ ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) lowerCAmelCase = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
338
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Union[str, Any]: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , ) return config def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float() model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = False UpperCAmelCase_ : Dict = (EsmForProteinFolding,) if is_torch_available() else () UpperCAmelCase_ : List[Any] = () UpperCAmelCase_ : Tuple = {} if is_torch_available() else {} UpperCAmelCase_ : List[str] = False def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = EsmFoldModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) @unittest.skip('''Does not support attention outputs''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: pass @unittest.skip def SCREAMING_SNAKE_CASE_ ( self ) ->Any: pass @unittest.skip('''Esm does not support embedding resizing''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''Esm does not support embedding resizing''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold does not support passing input embeds!''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass @unittest.skip('''ESMFold does not output hidden states in the normal way.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: pass @unittest.skip('''ESMfold does not output hidden states in the normal way.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold only has one output format.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: pass @unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: pass @unittest.skip('''ESMFold does not support input chunking.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: pass @unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''ESMFold doesn\'t support data parallel.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass @require_torch class lowercase_ ( UpperCamelCase_ ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float() model.eval() lowerCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )['''positions'''] lowerCAmelCase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
338
1
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowercase__ : Optional[int] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple[int, int]: def constraint_to_multiple_of(snake_case__ , snake_case__ , snake_case__=0 , snake_case__=None ): lowerCAmelCase = round(val / multiple ) * multiple if max_val is not None and x > max_val: lowerCAmelCase = math.floor(val / multiple ) * multiple if x < min_val: lowerCAmelCase = math.ceil(val / multiple ) * multiple return x lowerCAmelCase = (output_size, output_size) if isinstance(snake_case__ , snake_case__ ) else output_size lowerCAmelCase , lowerCAmelCase = get_image_size(snake_case__ ) lowerCAmelCase , lowerCAmelCase = output_size # determine new height and width lowerCAmelCase = output_height / input_height lowerCAmelCase = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width lowerCAmelCase = scale_width else: # fit height lowerCAmelCase = scale_height lowerCAmelCase = constraint_to_multiple_of(scale_height * input_height , multiple=snake_case__ ) lowerCAmelCase = constraint_to_multiple_of(scale_width * input_width , multiple=snake_case__ ) return (new_height, new_width) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = ["""pixel_values"""] def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 255 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None: super().__init__(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = size if size is not None else {'''height''': 384, '''width''': 384} lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = keep_aspect_ratio lowerCAmelCase = ensure_multiple_of lowerCAmelCase = resample lowerCAmelCase = do_rescale lowerCAmelCase = rescale_factor lowerCAmelCase = do_normalize lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->np.ndarray: lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" ) lowerCAmelCase = get_resize_output_image_size( __SCREAMING_SNAKE_CASE , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=__SCREAMING_SNAKE_CASE , multiple=__SCREAMING_SNAKE_CASE , ) return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->Any: return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->np.ndarray: return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ) ->PIL.Image.Image: lowerCAmelCase = do_resize if do_resize is not None else self.do_resize lowerCAmelCase = size if size is not None else self.size lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio lowerCAmelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of lowerCAmelCase = resample if resample is not None else self.resample lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase = image_mean if image_mean is not None else self.image_mean lowerCAmelCase = image_std if image_std is not None else self.image_std lowerCAmelCase = make_list_of_images(__SCREAMING_SNAKE_CASE ) if not valid_images(__SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images] if do_resize: lowerCAmelCase = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowerCAmelCase = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: lowerCAmelCase = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images] lowerCAmelCase = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images] lowerCAmelCase = {'''pixel_values''': images} return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple: lowerCAmelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(__SCREAMING_SNAKE_CASE ): lowerCAmelCase = target_sizes.numpy() lowerCAmelCase = [] for idx in range(len(__SCREAMING_SNAKE_CASE ) ): lowerCAmelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = logits.argmax(dim=1 ) lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
338
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""] UpperCAmelCase_ : int = """OwlViTImageProcessor""" UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any: lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __SCREAMING_SNAKE_CASE , ) lowerCAmelCase = kwargs.pop('''feature_extractor''' ) lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int: if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )): lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )] elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = [] # Maximum number of queries across batch lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__SCREAMING_SNAKE_CASE ) != max_num_queries: lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE )) lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) encodings.append(__SCREAMING_SNAKE_CASE ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) lowerCAmelCase = BatchEncoding() lowerCAmelCase = input_ids lowerCAmelCase = attention_mask if query_images is not None: lowerCAmelCase = BatchEncoding() lowerCAmelCase = self.image_processor( __SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values lowerCAmelCase = query_pixel_values if images is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is not None and images is not None: lowerCAmelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCAmelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]: return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any: return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple: return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str: return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]: return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE_ ( self ) ->int: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor
338
1
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''', datefmt='''%Y-%m-%d %H:%M:%S''', level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(), stream=sys.stdout, ) lowercase__ : Optional[int] = logging.getLogger(__name__) lowercase__ : int = {'''facebook/bart-base''': BartForConditionalGeneration} lowercase__ : List[str] = {'''facebook/bart-base''': BartTokenizer} def SCREAMING_SNAKE_CASE_ ( ) -> int: lowerCAmelCase = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' ) parser.add_argument( '''--validation_file''' , type=snake_case__ , default=snake_case__ , help='''A csv or a json file containing the validation data.''' ) parser.add_argument( '''--max_length''' , type=snake_case__ , default=5 , help='''The maximum total input sequence length after tokenization.''' , ) parser.add_argument( '''--num_beams''' , type=snake_case__ , default=snake_case__ , help=( '''Number of beams to use for evaluation. This argument will be ''' '''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.''' ) , ) parser.add_argument( '''--model_name_or_path''' , type=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case__ , ) parser.add_argument( '''--config_name''' , type=snake_case__ , default=snake_case__ , help='''Pretrained config name or path if not the same as model_name''' , ) parser.add_argument( '''--device''' , type=snake_case__ , default='''cpu''' , help='''Device where the model will be run''' , ) parser.add_argument('''--output_file_path''' , type=snake_case__ , default=snake_case__ , help='''Where to store the final ONNX file.''' ) lowerCAmelCase = parser.parse_args() return args def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__="cpu" ) -> List[str]: lowerCAmelCase = model_dict[model_name].from_pretrained(snake_case__ ).to(snake_case__ ) lowerCAmelCase = tokenizer_dict[model_name].from_pretrained(snake_case__ ) if model_name in ["facebook/bart-base"]: lowerCAmelCase = 0 lowerCAmelCase = None lowerCAmelCase = 0 return huggingface_model, tokenizer def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple: model.eval() lowerCAmelCase = None lowerCAmelCase = torch.jit.script(BARTBeamSearchGenerator(snake_case__ ) ) with torch.no_grad(): lowerCAmelCase = '''My friends are cool but they eat too many carbs.''' lowerCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors='''pt''' ).to(model.device ) lowerCAmelCase = model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=snake_case__ , max_length=snake_case__ , early_stopping=snake_case__ , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( snake_case__ , ( inputs['''input_ids'''], inputs['''attention_mask'''], num_beams, max_length, model.config.decoder_start_token_id, ) , snake_case__ , opset_version=1_4 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={ '''input_ids''': {0: '''batch''', 1: '''seq'''}, '''output_ids''': {0: '''batch''', 1: '''seq_out'''}, } , example_outputs=snake_case__ , ) logger.info('''Model exported to {}'''.format(snake_case__ ) ) lowerCAmelCase = remove_dup_initializers(os.path.abspath(snake_case__ ) ) logger.info('''Deduplicated and optimized model written to {}'''.format(snake_case__ ) ) lowerCAmelCase = onnxruntime.InferenceSession(snake_case__ ) lowerCAmelCase = ort_sess.run( snake_case__ , { '''input_ids''': inputs['''input_ids'''].cpu().numpy(), '''attention_mask''': inputs['''attention_mask'''].cpu().numpy(), '''num_beams''': np.array(snake_case__ ), '''max_length''': np.array(snake_case__ ), '''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info('''Model outputs from torch and ONNX Runtime are similar.''' ) logger.info('''Success.''' ) def SCREAMING_SNAKE_CASE_ ( ) -> Any: lowerCAmelCase = parse_args() lowerCAmelCase = 5 lowerCAmelCase = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() lowerCAmelCase = torch.device(args.device ) lowerCAmelCase , lowerCAmelCase = load_model_tokenizer(args.model_name_or_path , snake_case__ ) if model.config.decoder_start_token_id is None: raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' ) model.to(snake_case__ ) if args.max_length: lowerCAmelCase = args.max_length if args.num_beams: lowerCAmelCase = args.num_beams if args.output_file_path: lowerCAmelCase = args.output_file_path else: lowerCAmelCase = '''BART.onnx''' logger.info('''Exporting model to ONNX''' ) export_and_validate_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if __name__ == "__main__": main()
338
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase__ : List[Any] = logging.get_logger(__name__) lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''} lowercase__ : Optional[int] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } lowercase__ : Any = { '''albert-base-v1''': 5_1_2, '''albert-large-v1''': 5_1_2, '''albert-xlarge-v1''': 5_1_2, '''albert-xxlarge-v1''': 5_1_2, '''albert-base-v2''': 5_1_2, '''albert-large-v2''': 5_1_2, '''albert-xlarge-v2''': 5_1_2, '''albert-xxlarge-v2''': 5_1_2, } lowercase__ : Tuple = '''▁''' class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase = ( AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token ) lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = do_lower_case lowerCAmelCase = remove_space lowerCAmelCase = keep_accents lowerCAmelCase = vocab_file lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->Any: return len(self.sp_model ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) ->int: lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any: if self.remove_space: lowerCAmelCase = ''' '''.join(inputs.strip().split() ) else: lowerCAmelCase = inputs lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] ) if self.do_lower_case: lowerCAmelCase = outputs.lower() return outputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] for piece in pieces: if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCAmelCase = cur_pieces[1:] else: lowerCAmelCase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__SCREAMING_SNAKE_CASE ) else: new_pieces.append(__SCREAMING_SNAKE_CASE ) return new_pieces def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int: return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int: return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]: lowerCAmelCase = [] lowerCAmelCase = '''''' lowerCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token lowerCAmelCase = True lowerCAmelCase = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string.strip() def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is not None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
338
1
import os import unittest from transformers import LxmertTokenizer, LxmertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase_ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = LxmertTokenizer UpperCAmelCase_ : Dict = LxmertTokenizerFast UpperCAmelCase_ : Dict = True UpperCAmelCase_ : Union[str, Any] = True def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: super().setUp() lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Dict: lowerCAmelCase = '''UNwant\u00E9d,running''' lowerCAmelCase = '''unwanted, running''' return input_text, output_text def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = self.tokenizer_class(self.vocab_file ) lowerCAmelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: if not self.test_rust_tokenizer: return lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = '''I was born in 92000, and this is falsé.''' lowerCAmelCase = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = tokenizer.encode(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
338
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,) UpperCAmelCase_ : int = (("""num_inference_steps""", 25),) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str: lowerCAmelCase = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, } config.update(**__SCREAMING_SNAKE_CASE ) return config def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCAmelCase , lowerCAmelCase = sample, sample for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: pass def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]: if scheduler is None: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = 10 lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample return sample def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ): lowerCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] lowerCAmelCase = scheduler.timesteps[5] lowerCAmelCase = scheduler.timesteps[6] lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: # make sure that iterating over schedulers with same config names gives same results # for defaults lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() ) lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = self.full_loop( solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , ) assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.full_loop() lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = 10 lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa
338
1
# Imports import numpy as np class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->int: self.set_matricies(red=__SCREAMING_SNAKE_CASE , green=__SCREAMING_SNAKE_CASE , blue=__SCREAMING_SNAKE_CASE , red_edge=__SCREAMING_SNAKE_CASE , nir=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->List[Any]: if red is not None: lowerCAmelCase = red if green is not None: lowerCAmelCase = green if blue is not None: lowerCAmelCase = blue if red_edge is not None: lowerCAmelCase = red_edge if nir is not None: lowerCAmelCase = nir return True def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE="" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict: self.set_matricies(red=__SCREAMING_SNAKE_CASE , green=__SCREAMING_SNAKE_CASE , blue=__SCREAMING_SNAKE_CASE , red_edge=__SCREAMING_SNAKE_CASE , nir=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = { '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('''Index not in the list!''' ) return False def SCREAMING_SNAKE_CASE_ ( self ) ->Any: return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: return self.nir * (self.red / (self.green**2)) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: return (self.nir - self.red) / (self.nir + self.red) def SCREAMING_SNAKE_CASE_ ( self ) ->str: return (self.nir - self.blue) / (self.nir + self.blue) def SCREAMING_SNAKE_CASE_ ( self ) ->str: return (self.redEdge - self.red) / (self.redEdge + self.red) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: return (self.nir - self.green) / (self.nir + self.green) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def SCREAMING_SNAKE_CASE_ ( self ) ->int: return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0.0_8 , __SCREAMING_SNAKE_CASE=1.2_2 , __SCREAMING_SNAKE_CASE=0.0_3 ) ->List[str]: return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: return (self.nir / self.green) - 1 def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: return (self.nir / self.redEdge) - 1 def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: return (self.red - self.blue) / self.red def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: return self.nir - self.green def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0.1_6 ) ->List[Any]: return (self.nir - self.green) / (self.nir + self.green + y) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0.5 ) ->Optional[Any]: return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Tuple: return (self.nir - b) / (a * self.red) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: return (self.red + self.green + self.blue) / 3_0.5 def SCREAMING_SNAKE_CASE_ ( self ) ->int: return self.nir / self.red def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: return (self.rvi() - 1) / (self.rvi() + 1) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: return self.green / (self.nir + self.red + self.green) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: return self.nir / (self.nir + self.red + self.green) def SCREAMING_SNAKE_CASE_ ( self ) ->int: return self.red / (self.nir + self.red + self.green) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: return (self.green - self.red) / (self.green + self.red) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: return (self.red - self.green) / (self.red + self.green) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def SCREAMING_SNAKE_CASE_ ( self ) ->int: return self.nir / self.red def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: return (self.ndvi() + 0.5) ** (1 / 2) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: return (self.nir - self.redEdge) / (self.nir + self.redEdge)
338
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowercase_ ( unittest.TestCase ): """simple docstring""" @property def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: torch.manual_seed(0 ) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = self.dummy_uncond_unet lowerCAmelCase = KarrasVeScheduler() lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = '''google/ncsnpp-celebahq-256''' lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = KarrasVeScheduler() lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
338
1
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = 1 lowerCAmelCase = 3 lowerCAmelCase = (32, 32) lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE ) return image @property def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: torch.manual_seed(0 ) lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) return model @property def SCREAMING_SNAKE_CASE_ ( self ) ->Any: torch.manual_seed(0 ) lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: torch.manual_seed(0 ) lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(__SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE_ ( self ) ->str: def extract(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): class lowercase_ : """simple docstring""" def __init__( self ) ->Any: lowerCAmelCase = torch.ones([0] ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]: self.pixel_values.to(__SCREAMING_SNAKE_CASE ) return self return Out() return extract def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.dummy_cond_unet lowerCAmelCase = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = self.dummy_vae lowerCAmelCase = self.dummy_text_encoder lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk lowerCAmelCase = StableDiffusionPipeline( unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , ) lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = '''A painting of a squirrel eating a burger''' lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 ) lowerCAmelCase = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase = output.images lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 ) lowerCAmelCase = sd_pipe( [prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__SCREAMING_SNAKE_CASE , )[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.dummy_cond_unet lowerCAmelCase = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_vae lowerCAmelCase = self.dummy_text_encoder lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk lowerCAmelCase = StableDiffusionPipeline( unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , ) lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = '''A painting of a squirrel eating a burger''' lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 ) lowerCAmelCase = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase = output.images lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 ) lowerCAmelCase = sd_pipe( [prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__SCREAMING_SNAKE_CASE , )[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = StableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=__SCREAMING_SNAKE_CASE ) assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert isinstance(pipe.scheduler , __SCREAMING_SNAKE_CASE ) assert pipe.safety_checker is None lowerCAmelCase = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE ) # sanity check that the pipeline still works assert pipe.safety_checker is None lowerCAmelCase = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = self.dummy_cond_unet lowerCAmelCase = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_vae lowerCAmelCase = self.dummy_text_encoder lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # put models in fp16 lowerCAmelCase = unet.half() lowerCAmelCase = vae.half() lowerCAmelCase = bert.half() # make sure here that pndm scheduler skips prk lowerCAmelCase = StableDiffusionPipeline( unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , ) lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = '''A painting of a squirrel eating a burger''' lowerCAmelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = ( '''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle''' ''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with''' ''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and''' ''' children from bahnhof zoo, detailed ''' ) lowerCAmelCase = 4003660346 lowerCAmelCase = 7 # without safety guidance (sld_guidance_scale = 0) lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = sd_pipe( [prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = sd_pipe( [prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = '''padme amidala taking a bath artwork, safe for work, no nudity''' lowerCAmelCase = 2734971755 lowerCAmelCase = 7 lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = sd_pipe( [prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = sd_pipe( [prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ) lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = ( '''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.''' ''' leyendecker''' ) lowerCAmelCase = 1044355234 lowerCAmelCase = 12 lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = sd_pipe( [prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = sd_pipe( [prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
338
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch lowercase__ : Dict = logging.get_logger(__name__) @add_end_docstrings( UpperCamelCase_ , r""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ , ) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray: if self.framework == "tf": lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ) else: raise ValueError('''Unsupported framework''' ) return masked_index def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray: lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]: if return_tensors is None: lowerCAmelCase = self.framework lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = model_inputs['''input_ids'''] return model_outputs def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: lowerCAmelCase = target_ids.shape[0] lowerCAmelCase = model_outputs['''input_ids'''][0] lowerCAmelCase = model_outputs['''logits'''] if self.framework == "tf": lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] lowerCAmelCase = outputs.numpy() lowerCAmelCase = outputs[0, masked_index, :] lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) if target_ids is not None: lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) ) lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 ) lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy() else: lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample lowerCAmelCase = outputs[0, masked_index, :] lowerCAmelCase = logits.softmax(dim=-1 ) if target_ids is not None: lowerCAmelCase = probs[..., target_ids] lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] lowerCAmelCase = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): lowerCAmelCase = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place lowerCAmelCase = input_ids.numpy().copy() if target_ids is not None: lowerCAmelCase = target_ids[p].tolist() lowerCAmelCase = p # Filter padding out: lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence} row.append(__SCREAMING_SNAKE_CASE ) result.append(__SCREAMING_SNAKE_CASE ) if single_mask: return result[0] return result def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = [targets] try: lowerCAmelCase = self.tokenizer.get_vocab() except Exception: lowerCAmelCase = {} lowerCAmelCase = [] for target in targets: lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if id_ is None: lowerCAmelCase = self.tokenizer( __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids'''] if len(__SCREAMING_SNAKE_CASE ) == 0: logger.warning( F"The specified target token `{target}` does not exist in the model vocabulary. " '''We cannot replace it with anything meaningful, ignoring it''' ) continue lowerCAmelCase = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"The specified target token `{target}` does not exist in the model vocabulary. " F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." ) target_ids.append(id_ ) lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) ) if len(__SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) return target_ids def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict: lowerCAmelCase = {} if targets is not None: lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = target_ids if top_k is not None: lowerCAmelCase = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]: lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1: return outputs[0] return outputs
338
1
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int: if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(snake_case__ , snake_case__ ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(snake_case__ ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
338
from typing import TYPE_CHECKING from ...utils import _LazyModule lowercase__ : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
338
1
import unittest import numpy as np def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray: lowerCAmelCase = np.shape(snake_case__ ) lowerCAmelCase = np.shape(snake_case__ ) lowerCAmelCase = np.shape(snake_case__ ) if shape_a[0] != shape_b[0]: lowerCAmelCase = ( '''Expected the same number of rows for A and B. ''' f"Instead found A of size {shape_a} and B of size {shape_b}" ) raise ValueError(snake_case__ ) if shape_b[1] != shape_c[1]: lowerCAmelCase = ( '''Expected the same number of columns for B and C. ''' f"Instead found B of size {shape_b} and C of size {shape_c}" ) raise ValueError(snake_case__ ) lowerCAmelCase = pseudo_inv if a_inv is None: try: lowerCAmelCase = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1], [6, 3]] ) lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.block([[a, b], [b.T, c]] ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s ) def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
338
lowercase__ : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def SCREAMING_SNAKE_CASE_ ( ) -> None: lowerCAmelCase = input('''Enter message: ''' ) lowerCAmelCase = input('''Enter key [alphanumeric]: ''' ) lowerCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' ) if mode.lower().startswith('''e''' ): lowerCAmelCase = '''encrypt''' lowerCAmelCase = encrypt_message(snake_case__ , snake_case__ ) elif mode.lower().startswith('''d''' ): lowerCAmelCase = '''decrypt''' lowerCAmelCase = decrypt_message(snake_case__ , snake_case__ ) print(f"\n{mode.title()}ed message:" ) print(snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: return translate_message(snake_case__ , snake_case__ , '''encrypt''' ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: return translate_message(snake_case__ , snake_case__ , '''decrypt''' ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str: lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = key.upper() for symbol in message: lowerCAmelCase = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(snake_case__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(snake_case__ ): lowerCAmelCase = 0 else: translated.append(snake_case__ ) return "".join(snake_case__ ) if __name__ == "__main__": main()
338
1
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = (CMStochasticIterativeScheduler,) UpperCAmelCase_ : Optional[int] = 10 def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str: lowerCAmelCase = { '''num_train_timesteps''': 201, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } config.update(**__SCREAMING_SNAKE_CASE ) return config def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = 10 lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = self.scheduler_classes[0](**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.timesteps[0] lowerCAmelCase = scheduler.timesteps[1] lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = 1 scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.timesteps lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(__SCREAMING_SNAKE_CASE ): # 1. scale model input lowerCAmelCase = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict noise residual lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 3. predict previous sample x_t-1 lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2 assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [106, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.timesteps lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input lowerCAmelCase = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict noise residual lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 3. predict previous sample x_t-1 lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2 assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [39, 30, 12, 15, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [39, 30, 12, 1, 0] lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
338
from collections import defaultdict from math import ceil, sqrt def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int: lowerCAmelCase = defaultdict(snake_case__ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: lowerCAmelCase = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: lowerCAmelCase = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(snake_case__ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(f'{solution() = }')
338
1
from dataclasses import dataclass, field from typing import Optional @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : Optional[str] = field( default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} ) UpperCAmelCase_ : Optional[str] = field( default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} ) UpperCAmelCase_ : Optional[str] = field( default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} ) UpperCAmelCase_ : Optional[str] = field( default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} ) UpperCAmelCase_ : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for training."""} ) UpperCAmelCase_ : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} ) UpperCAmelCase_ : Optional[float] = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} ) UpperCAmelCase_ : Optional[int] = field( default=10000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} ) UpperCAmelCase_ : Optional[float] = field(default=2e-4 , metadata={"""help""": """Learning rate fo training."""} ) UpperCAmelCase_ : Optional[str] = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} ) UpperCAmelCase_ : Optional[int] = field( default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} ) UpperCAmelCase_ : Optional[int] = field( default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} ) UpperCAmelCase_ : Optional[bool] = field( default=UpperCamelCase_ , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} ) UpperCAmelCase_ : Optional[int] = field(default=50000 , metadata={"""help""": """Maximum number of training steps."""} ) UpperCAmelCase_ : Optional[int] = field( default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} ) UpperCAmelCase_ : Optional[int] = field(default=1024 , metadata={"""help""": """Sequence lengths used for training."""} ) UpperCAmelCase_ : Optional[int] = field(default=1 , metadata={"""help""": """Training seed."""} ) UpperCAmelCase_ : Optional[int] = field( default=1024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , ) UpperCAmelCase_ : Optional[str] = field( default=UpperCamelCase_ , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} ) UpperCAmelCase_ : Optional[bool] = field(default=UpperCamelCase_ , metadata={"""help""": """If True the data is pretokenized."""} ) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : Optional[str] = field( default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} ) UpperCAmelCase_ : Optional[str] = field( default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} ) UpperCAmelCase_ : Optional[int] = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} ) UpperCAmelCase_ : Optional[int] = field( default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} ) UpperCAmelCase_ : Optional[int] = field(default=1024 , metadata={"""help""": """Length of sequences to be evaluated."""} ) UpperCAmelCase_ : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} ) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : Optional[str] = field( default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} ) UpperCAmelCase_ : Optional[int] = field(default=UpperCamelCase_ , metadata={"""help""": """Number of workers used for code evaluation."""} ) UpperCAmelCase_ : Optional[int] = field( default=UpperCamelCase_ , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , ) UpperCAmelCase_ : Optional[bool] = field( default=UpperCamelCase_ , metadata={"""help""": """Sample from the language model's output distribution."""} ) UpperCAmelCase_ : Optional[float] = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} ) UpperCAmelCase_ : Optional[int] = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""} ) UpperCAmelCase_ : Optional[int] = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} ) UpperCAmelCase_ : Optional[float] = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} ) UpperCAmelCase_ : Optional[int] = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} ) UpperCAmelCase_ : Optional[int] = field( default=200 , metadata={"""help""": """Number of completions to generate for each sample."""} ) UpperCAmelCase_ : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} ) UpperCAmelCase_ : Optional[str] = field( default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} ) UpperCAmelCase_ : Optional[str] = field( default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} ) UpperCAmelCase_ : Optional[int] = field( default=-1 , metadata={ """help""": ( """Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive""" """ number corresponds to which GPU device id to run on.""" ) } , ) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : Optional[int] = field( default=UpperCamelCase_ , metadata={ """help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.""" } , ) UpperCAmelCase_ : Optional[str] = field( default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} ) UpperCAmelCase_ : Optional[str] = field( default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} ) UpperCAmelCase_ : Optional[int] = field( default=100000 , metadata={"""help""": """Number of files to save per JSON output file."""} ) UpperCAmelCase_ : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} ) UpperCAmelCase_ : Optional[float] = field( default=1000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} ) UpperCAmelCase_ : Optional[float] = field( default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} ) UpperCAmelCase_ : Optional[float] = field( default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} ) UpperCAmelCase_ : Optional[float] = field( default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} ) UpperCAmelCase_ : Optional[float] = field( default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} ) UpperCAmelCase_ : Optional[str] = field( default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , ) UpperCAmelCase_ : Optional[bool] = field( default=UpperCamelCase_ , metadata={"""help""": """If True, near-duplicate samples are removed."""} ) UpperCAmelCase_ : Optional[float] = field( default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} ) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : Optional[str] = field( default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} ) UpperCAmelCase_ : Optional[str] = field( default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} ) UpperCAmelCase_ : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} ) UpperCAmelCase_ : Optional[int] = field(default=200000 , metadata={"""help""": """Number of examples to train tokenizer on."""} ) UpperCAmelCase_ : Optional[int] = field( default=32768 , metadata={"""help""": """Number of examples to train the tokenizer on."""} ) UpperCAmelCase_ : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} ) UpperCAmelCase_ : Optional[bool] = field(default=UpperCamelCase_ , metadata={"""help""": """Push saved tokenizer to the hub."""} ) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : Optional[str] = field( default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} ) UpperCAmelCase_ : Optional[str] = field( default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} ) UpperCAmelCase_ : Optional[str] = field( default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} ) UpperCAmelCase_ : Optional[int] = field(default=UpperCamelCase_ , metadata={"""help""": """Number of workers used for code evaluation."""} ) @dataclass class lowercase_ : """simple docstring""" UpperCAmelCase_ : Optional[str] = field( default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} ) UpperCAmelCase_ : Optional[str] = field( default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} ) UpperCAmelCase_ : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} ) UpperCAmelCase_ : Optional[bool] = field(default=UpperCamelCase_ , metadata={"""help""": """Push saved tokenizer to the hub."""} )
338
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]: assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = features.copy() if features else default_expected_features lowerCAmelCase = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: if issubclass(snake_case__ , snake_case__ ): lowerCAmelCase = text_path elif issubclass(snake_case__ , snake_case__ ): lowerCAmelCase = [text_path] lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_text_dataset(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]: assert isinstance(snake_case__ , snake_case__ ) for split in splits: lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]: lowerCAmelCase = tmp_path / '''cache''' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = features.copy() if features else default_expected_features lowerCAmelCase = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any: if split: lowerCAmelCase = {split: text_path} else: lowerCAmelCase = '''train''' lowerCAmelCase = {'''train''': text_path, '''test''': text_path} lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''text''': '''string'''} lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
338
1
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() lowercase__ : List[Any] = 2 class lowercase_ : """simple docstring""" def __init__( self , *, # begin keyword-only arguments __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE=None , ) ->int: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = bos, unk, pad, eos lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = {} lowerCAmelCase = self.add_symbol(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.add_symbol(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.add_symbol(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.add_symbol(__SCREAMING_SNAKE_CASE ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = len(self.symbols ) def __eq__( self , __SCREAMING_SNAKE_CASE ) ->List[Any]: return self.indices == other.indices def __getitem__( self , __SCREAMING_SNAKE_CASE ) ->List[Any]: if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self ) ->str: return len(self.symbols ) def __contains__( self , __SCREAMING_SNAKE_CASE ) ->str: return sym in self.indices @classmethod def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE ) ->str: lowerCAmelCase = cls() d.add_from_file(__SCREAMING_SNAKE_CASE ) return d def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False ) ->Optional[Any]: if word in self.indices and not overwrite: lowerCAmelCase = self.indices[word] lowerCAmelCase = self.count[idx] + n return idx else: lowerCAmelCase = len(self.symbols ) lowerCAmelCase = idx self.symbols.append(__SCREAMING_SNAKE_CASE ) self.count.append(__SCREAMING_SNAKE_CASE ) return idx def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Dict: return 0 def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): try: with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(__SCREAMING_SNAKE_CASE ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(__SCREAMING_SNAKE_CASE ) ) return lowerCAmelCase = f.readlines() lowerCAmelCase = self._load_meta(__SCREAMING_SNAKE_CASE ) for line in lines[indices_start_line:]: try: lowerCAmelCase , lowerCAmelCase = line.rstrip().rsplit(''' ''' , 1 ) if field == "#fairseq:overwrite": lowerCAmelCase = True lowerCAmelCase , lowerCAmelCase = line.rsplit(''' ''' , 1 ) else: lowerCAmelCase = False lowerCAmelCase = int(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = line if word in self and not overwrite: raise RuntimeError( '''Duplicate word found when loading Dictionary: \'{}\'. ''' '''Duplicate words can overwrite earlier ones by adding the ''' '''#fairseq:overwrite flag at the end of the corresponding row ''' '''in the dictionary file. If using the Camembert model, please ''' '''download an updated copy of the model file.'''.format(__SCREAMING_SNAKE_CASE ) ) self.add_symbol(__SCREAMING_SNAKE_CASE , n=__SCREAMING_SNAKE_CASE , overwrite=__SCREAMING_SNAKE_CASE ) except ValueError: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int: # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} lowerCAmelCase = dict((re.sub(R'''@@$''' , '''''' , snake_case__ ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , snake_case__ ), v) for k, v in d.items() ) lowerCAmelCase = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[f"{k}</w>"] lowerCAmelCase = d[k] # restore return da def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Optional[Any]: # prep if not os.path.exists(snake_case__ ): raise ValueError(f"path {biogpt_checkpoint_path} does not exist!" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) print(f"Writing results to {pytorch_dump_folder_path}" ) # handle various types of models lowerCAmelCase = os.path.join(snake_case__ , '''checkpoint.pt''' ) if not os.path.isfile(snake_case__ ): raise ValueError(f"path to the file {checkpoint_file} does not exist!" ) lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' ) lowerCAmelCase = chkpt['''cfg''']['''model'''] # dicts lowerCAmelCase = os.path.join(snake_case__ , '''dict.txt''' ) if not os.path.isfile(snake_case__ ): raise ValueError(f"path to the file {dict_file} does not exist!" ) lowerCAmelCase = Dictionary.load(snake_case__ ) lowerCAmelCase = rewrite_dict_keys(src_dict.indices ) lowerCAmelCase = len(snake_case__ ) lowerCAmelCase = os.path.join(snake_case__ , VOCAB_FILES_NAMES['''vocab_file'''] ) print(f"Generating {src_vocab_file} of {src_vocab_size} records" ) with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__ ) ) # merges_file (bpecodes) lowerCAmelCase = os.path.join(snake_case__ , '''bpecodes''' ) if not os.path.isfile(snake_case__ ): raise ValueError(f"path to the file {bpecodes_file} does not exist!" ) lowerCAmelCase = os.path.join(snake_case__ , VOCAB_FILES_NAMES['''merges_file'''] ) shutil.copyfile(snake_case__ , snake_case__ ) # model config lowerCAmelCase = os.path.join(snake_case__ , '''config.json''' ) lowerCAmelCase = { '''activation_dropout''': args['''activation_dropout'''], '''architectures''': ['''BioGptForCausalLM'''], '''attention_probs_dropout_prob''': args['''attention_dropout'''], '''bos_token_id''': 0, '''eos_token_id''': 2, '''hidden_act''': args['''activation_fn'''], '''hidden_dropout_prob''': args['''dropout'''], '''hidden_size''': args['''decoder_embed_dim'''], '''initializer_range''': 0.02, '''intermediate_size''': args['''decoder_ffn_embed_dim'''], '''layer_norm_eps''': 1E-12, '''layerdrop''': args['''decoder_layerdrop'''], '''max_position_embeddings''': args['''max_target_positions'''], '''model_type''': '''biogpt''', '''num_attention_heads''': args['''decoder_attention_heads'''], '''num_hidden_layers''': args['''decoder_layers'''], '''pad_token_id''': 1, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_decoder_input_output_embed'''], '''vocab_size''': src_vocab_size, } # good hparam defaults to start with print(f"Generating {biogpt_model_config_file}" ) with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__ ) ) # tokenizer config lowerCAmelCase = os.path.join(snake_case__ , snake_case__ ) lowerCAmelCase = { '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''model_max_length''': 1_0_2_4, '''pad_token''': '''<pad>''', '''special_tokens_map_file''': None, '''tokenizer_class''': '''BioGptTokenizer''', '''unk_token''': '''<unk>''', } print(f"Generating {biogpt_tokenizer_config_file}" ) with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__ ) ) # model lowerCAmelCase = chkpt['''model'''] # remove unneeded keys lowerCAmelCase = [ '''decoder.version''', ] for k in ignore_keys: model_state_dict.pop(snake_case__ , snake_case__ ) lowerCAmelCase = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('''output_projection.weight''' ): lowerCAmelCase = model_state_dict.pop(snake_case__ ) else: lowerCAmelCase = model_state_dict.pop(snake_case__ ) lowerCAmelCase = BioGptConfig.from_pretrained(snake_case__ ) lowerCAmelCase = BioGptForCausalLM(snake_case__ ) # check that it loads ok model_new.load_state_dict(snake_case__ ) # save lowerCAmelCase = os.path.join(snake_case__ , snake_case__ ) print(f"Generating {pytorch_weights_dump_path}" ) torch.save(snake_case__ , snake_case__ ) print('''Conversion is done!''' ) if __name__ == "__main__": lowercase__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--biogpt_checkpoint_path''', default=None, type=str, required=True, help=( '''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,''' ''' bpecodes, etc.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowercase__ : Union[str, Any] = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
338
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: if isinstance(snake_case__ , snake_case__ ): raise TypeError('''\'float\' object cannot be interpreted as an integer''' ) if isinstance(snake_case__ , snake_case__ ): raise TypeError('''\'str\' object cannot be interpreted as an integer''' ) if num == 0: return "0b0" lowerCAmelCase = False if num < 0: lowerCAmelCase = True lowerCAmelCase = -num lowerCAmelCase = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(snake_case__ ) for e in binary ) return "0b" + "".join(str(snake_case__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
338
1
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class lowercase_ : """simple docstring""" UpperCAmelCase_ : List[str] = MBartConfig UpperCAmelCase_ : Any = {} UpperCAmelCase_ : int = """gelu""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=20 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , ) ->Tuple: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = eos_token_id lowerCAmelCase = pad_token_id lowerCAmelCase = bos_token_id def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCAmelCase = prepare_mbart_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return config, inputs_dict def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Dict: lowerCAmelCase = TFMBartModel(config=__SCREAMING_SNAKE_CASE ).get_decoder() lowerCAmelCase = inputs_dict['''input_ids'''] lowerCAmelCase = input_ids[:1, :] lowerCAmelCase = inputs_dict['''attention_mask'''][:1, :] lowerCAmelCase = inputs_dict['''head_mask'''] lowerCAmelCase = 1 # first forward pass lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = outputs.to_tuple() lowerCAmelCase = past_key_values[1] def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , ) -> List[str]: if attention_mask is None: lowerCAmelCase = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : str = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () UpperCAmelCase_ : List[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase_ : Union[str, Any] = ( { """conversational""": TFMBartForConditionalGeneration, """feature-extraction""": TFMBartModel, """summarization""": TFMBartForConditionalGeneration, """text2text-generation""": TFMBartForConditionalGeneration, """translation""": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase_ : int = True UpperCAmelCase_ : str = False UpperCAmelCase_ : str = False def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->str: if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = TFMBartModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE ) @require_sentencepiece @require_tokenizers @require_tf class lowercase_ ( unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : List[Any] = [ """ UN Chief Says There Is No Military Solution in Syria""", ] UpperCAmelCase_ : Any = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", ] UpperCAmelCase_ : str = """facebook/mbart-large-en-ro""" @cached_property def SCREAMING_SNAKE_CASE_ ( self ) ->str: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->int: lowerCAmelCase = self.translate_src_text(**__SCREAMING_SNAKE_CASE ) self.assertListEqual(self.expected_text , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Dict: lowerCAmelCase = self.tokenizer(self.src_text , **__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) lowerCAmelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) lowerCAmelCase = self.tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) return generated_words @slow def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: self._assert_generated_batch_equal_expected()
338
class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any: lowerCAmelCase = name lowerCAmelCase = value lowerCAmelCase = weight def __repr__( self ) ->str: return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: return self.value def SCREAMING_SNAKE_CASE_ ( self ) ->int: return self.name def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: return self.weight def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: return self.value / self.weight def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int: lowerCAmelCase = [] for i in range(len(snake_case__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ ) lowerCAmelCase = [] lowerCAmelCase , lowerCAmelCase = 0.0, 0.0 for i in range(len(snake_case__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: pass if __name__ == "__main__": import doctest doctest.testmod()
338
1
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: if not all(char in '''01''' for char in bin_string ): raise ValueError('''Non-binary value was passed to the function''' ) if not bin_string: raise ValueError('''Empty string was passed to the function''' ) lowerCAmelCase = '''''' while len(snake_case__ ) % 3 != 0: lowerCAmelCase = '''0''' + bin_string lowerCAmelCase = [ bin_string[index : index + 3] for index in range(len(snake_case__ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: lowerCAmelCase = 0 for index, val in enumerate(snake_case__ ): oct_val += int(2 ** (2 - index) * int(snake_case__ ) ) oct_string += str(snake_case__ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
338
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). lowercase__ : Optional[int] = [0, 2_5, 5_0] lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5] lowercase__ : int = fuzz.membership.trimf(X, abca) lowercase__ : Tuple = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. lowercase__ : List[str] = np.ones(7_5) lowercase__ : Any = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] lowercase__ : Any = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) lowercase__ : str = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('''Young''') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('''Middle aged''') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('''union''') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('''intersection''') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('''complement_a''') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('''difference a/b''') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('''alg_sum''') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('''alg_product''') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('''bdd_sum''') plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title('''bdd_difference''') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
338
1
from __future__ import annotations import time lowercase__ : int = list[tuple[int, int]] lowercase__ : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase__ : Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = pos_x lowerCAmelCase = pos_y lowerCAmelCase = (pos_y, pos_x) lowerCAmelCase = goal_x lowerCAmelCase = goal_y lowerCAmelCase = parent class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]: lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = [self.start] lowerCAmelCase = False def SCREAMING_SNAKE_CASE_ ( self ) ->Path | None: while self.node_queue: lowerCAmelCase = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: lowerCAmelCase = True return self.retrace_path(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.get_successors(__SCREAMING_SNAKE_CASE ) for node in successors: self.node_queue.append(__SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->list[Node]: lowerCAmelCase = [] for action in delta: lowerCAmelCase = parent.pos_x + action[1] lowerCAmelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , __SCREAMING_SNAKE_CASE ) ) return successors def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Path: lowerCAmelCase = node lowerCAmelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowerCAmelCase = current_node.parent path.reverse() return path class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[Any]: lowerCAmelCase = BreadthFirstSearch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = BreadthFirstSearch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = False def SCREAMING_SNAKE_CASE_ ( self ) ->Path | None: while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: lowerCAmelCase = self.fwd_bfs.node_queue.pop(0 ) lowerCAmelCase = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: lowerCAmelCase = True return self.retrace_bidirectional_path( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = current_bwd_node lowerCAmelCase = current_fwd_node lowerCAmelCase = { self.fwd_bfs: self.fwd_bfs.get_successors(__SCREAMING_SNAKE_CASE ), self.bwd_bfs: self.bwd_bfs.get_successors(__SCREAMING_SNAKE_CASE ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(__SCREAMING_SNAKE_CASE ) if not self.reached: return [self.fwd_bfs.start.pos] return None def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Path: lowerCAmelCase = self.fwd_bfs.retrace_path(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.bwd_bfs.retrace_path(__SCREAMING_SNAKE_CASE ) bwd_path.pop() bwd_path.reverse() lowerCAmelCase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() lowercase__ : Dict = (0, 0) lowercase__ : Optional[int] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowercase__ : Dict = time.time() lowercase__ : Optional[Any] = BreadthFirstSearch(init, goal) lowercase__ : Dict = bfs.search() lowercase__ : Tuple = time.time() - start_bfs_time print('''Unidirectional BFS computation time : ''', bfs_time) lowercase__ : Optional[int] = time.time() lowercase__ : List[Any] = BidirectionalBreadthFirstSearch(init, goal) lowercase__ : Optional[Any] = bd_bfs.search() lowercase__ : Union[str, Any] = time.time() - start_bd_bfs_time print('''Bidirectional BFS computation time : ''', bd_bfs_time)
338
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : str = (DDPMScheduler,) def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]: lowerCAmelCase = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__SCREAMING_SNAKE_CASE ) return config def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter lowerCAmelCase = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ): if i == len(__SCREAMING_SNAKE_CASE ) - 1: lowerCAmelCase = -1 else: lowerCAmelCase = timesteps[i + 1] lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [100, 87, 50, 1, 0] lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
338
1
import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) lowercase__ : str = logging.getLogger(__name__) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]: lowerCAmelCase = self.layer[current_layer](__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , head_mask[current_layer] ) lowerCAmelCase = layer_outputs[0] return hidden_states @add_start_docstrings( """The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , UpperCamelCase_ , ) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE ) ->List[Any]: super().__init__(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = BertEncoderWithPabee(__SCREAMING_SNAKE_CASE ) self.init_weights() lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Dict: lowerCAmelCase = threshold def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]: lowerCAmelCase = patience def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = 0 lowerCAmelCase = 0 def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = self.inference_layers_num / self.inference_instances_num lowerCAmelCase = ( F"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =" F" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***" ) print(__SCREAMING_SNAKE_CASE ) @add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , ) ->Tuple: if input_ids is not None and inputs_embeds is not None: raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' ) elif input_ids is not None: lowerCAmelCase = input_ids.size() elif inputs_embeds is not None: lowerCAmelCase = inputs_embeds.size()[:-1] else: raise ValueError('''You have to specify either input_ids or inputs_embeds''' ) lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: lowerCAmelCase = torch.ones(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) if token_type_ids is None: lowerCAmelCase = torch.zeros(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. lowerCAmelCase = self.get_extended_attention_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = encoder_hidden_states.size() lowerCAmelCase = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: lowerCAmelCase = torch.ones(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.invert_attention_mask(__SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] lowerCAmelCase = self.get_head_mask(__SCREAMING_SNAKE_CASE , self.config.num_hidden_layers ) lowerCAmelCase = self.embeddings( input_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = embedding_output if self.training: lowerCAmelCase = [] for i in range(self.config.num_hidden_layers ): lowerCAmelCase = self.encoder.adaptive_forward( __SCREAMING_SNAKE_CASE , current_layer=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.pooler(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = output_layers[i](output_dropout(__SCREAMING_SNAKE_CASE ) ) res.append(__SCREAMING_SNAKE_CASE ) elif self.patience == 0: # Use all layers for inference lowerCAmelCase = self.encoder( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = self.pooler(encoder_outputs[0] ) lowerCAmelCase = [output_layers[self.config.num_hidden_layers - 1](__SCREAMING_SNAKE_CASE )] else: lowerCAmelCase = 0 lowerCAmelCase = None lowerCAmelCase = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 lowerCAmelCase = self.encoder.adaptive_forward( __SCREAMING_SNAKE_CASE , current_layer=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.pooler(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = output_layers[i](__SCREAMING_SNAKE_CASE ) if regression: lowerCAmelCase = logits.detach() if patient_result is not None: lowerCAmelCase = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: lowerCAmelCase = 0 else: lowerCAmelCase = logits.detach().argmax(dim=1 ) if patient_result is not None: lowerCAmelCase = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(__SCREAMING_SNAKE_CASE ) ): patient_counter += 1 else: lowerCAmelCase = 0 lowerCAmelCase = logits if patient_counter == self.patience: break lowerCAmelCase = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( """Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ , UpperCamelCase_ , ) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]: super().__init__(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = config.num_labels lowerCAmelCase = BertModelWithPabee(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob ) lowerCAmelCase = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ) ->Union[str, Any]: lowerCAmelCase = self.bert( input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) lowerCAmelCase = (logits[-1],) if labels is not None: lowerCAmelCase = None lowerCAmelCase = 0 for ix, logits_item in enumerate(__SCREAMING_SNAKE_CASE ): if self.num_labels == 1: # We are doing regression lowerCAmelCase = MSELoss() lowerCAmelCase = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: lowerCAmelCase = CrossEntropyLoss() lowerCAmelCase = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: lowerCAmelCase = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 lowerCAmelCase = (total_loss / total_weights,) + outputs return outputs
338
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase__ : str = logging.get_logger(__name__) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Any = """AutoTokenizer""" UpperCAmelCase_ : Optional[int] = ["""tokenizer"""] UpperCAmelCase_ : str = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]: super().__init__(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = speaker_embeddings @classmethod def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple: if speaker_embeddings_dict_path is not None: lowerCAmelCase = get_file_from_repo( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , ) if speaker_embeddings_path is None: logger.warning( F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." ) lowerCAmelCase = None else: with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json: lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = None lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int: if self.speaker_embeddings is not None: os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} lowerCAmelCase = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , ) lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" ) lowerCAmelCase = tmp_dict with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = self.speaker_embeddings[voice_preset] lowerCAmelCase = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." ) lowerCAmelCase = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , ) if path is None: raise ValueError( F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." ) lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE ) return voice_preset_dict def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple: for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F"Voice preset unrecognized, missing {key} as a key." ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int: if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if ( isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE ) else: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ): lowerCAmelCase = voice_preset + '''.npz''' lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE ) if voice_preset is not None: self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.tokenizer( __SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) if voice_preset is not None: lowerCAmelCase = voice_preset return encoded_text
338
1
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
338
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
338
1
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]: lowerCAmelCase = len(snake_case__ ) for i in range(length - 1 ): lowerCAmelCase = i for k in range(i + 1 , snake_case__ ): if collection[k] < collection[least]: lowerCAmelCase = k if least != i: lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least]) return collection if __name__ == "__main__": lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip() lowercase__ : str = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
338
import os import re import shutil import sys import tempfile import unittest import black lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. lowercase__ : Dict = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) ) lowerCAmelCase = self.transformer_dir shutil.copy( os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = '''src/transformers''' shutil.rmtree(self.transformer_dir ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]: lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' ) with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f: f.write(__SCREAMING_SNAKE_CASE ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f: self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: # Base copy consistency self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , ) # Copy consistency with a really long name lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , ) def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md'''] lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),''' ''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**''' ''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders''' ''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang''' ''' Luong, Quoc V. Le, Christopher D. Manning.''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文''' ''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自''' ''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather''' ''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,''' ''' Christopher D. Manning 发布。\n''' ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) self.assertFalse(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and''' ''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] ) # Check if the model link is synchronized. self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
338
1
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger lowercase__ : Optional[Any] = get_logger(__name__) class lowercase_ ( enum.Enum ): """simple docstring""" UpperCAmelCase_ : Optional[int] = """all_checks""" UpperCAmelCase_ : List[str] = """basic_checks""" UpperCAmelCase_ : Union[str, Any] = """no_checks""" class lowercase_ ( UpperCamelCase_ ): """simple docstring""" class lowercase_ ( UpperCamelCase_ ): """simple docstring""" class lowercase_ ( UpperCamelCase_ ): """simple docstring""" class lowercase_ ( UpperCamelCase_ ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=None ) -> Union[str, Any]: if expected_checksums is None: logger.info('''Unable to verify checksums.''' ) return if len(set(snake_case__ ) - set(snake_case__ ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(snake_case__ ) - set(snake_case__ ) ) ) if len(set(snake_case__ ) - set(snake_case__ ) ) > 0: raise UnexpectedDownloadedFile(str(set(snake_case__ ) - set(snake_case__ ) ) ) lowerCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] lowerCAmelCase = ''' for ''' + verification_name if verification_name is not None else '''''' if len(snake_case__ ) > 0: raise NonMatchingChecksumError( f"Checksums didn't match{for_verification_name}:\n" f"{bad_urls}\n" '''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' ) logger.info('''All the checksums matched successfully''' + for_verification_name ) class lowercase_ ( UpperCamelCase_ ): """simple docstring""" class lowercase_ ( UpperCamelCase_ ): """simple docstring""" class lowercase_ ( UpperCamelCase_ ): """simple docstring""" class lowercase_ ( UpperCamelCase_ ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Tuple: if expected_splits is None: logger.info('''Unable to verify splits sizes.''' ) return if len(set(snake_case__ ) - set(snake_case__ ) ) > 0: raise ExpectedMoreSplits(str(set(snake_case__ ) - set(snake_case__ ) ) ) if len(set(snake_case__ ) - set(snake_case__ ) ) > 0: raise UnexpectedSplits(str(set(snake_case__ ) - set(snake_case__ ) ) ) lowerCAmelCase = [ {'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(snake_case__ ) > 0: raise NonMatchingSplitsSizesError(str(snake_case__ ) ) logger.info('''All the splits matched successfully.''' ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ = True ) -> dict: if record_checksum: lowerCAmelCase = shaaaa() with open(snake_case__ , '''rb''' ) as f: for chunk in iter(lambda: f.read(1 << 2_0 ) , B'''''' ): m.update(snake_case__ ) lowerCAmelCase = m.hexdigest() else: lowerCAmelCase = None return {"num_bytes": os.path.getsize(snake_case__ ), "checksum": checksum} def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Dict: if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
338
import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase = split_dict._to_yaml_list() assert len(snake_case__ ) == len(snake_case__ ) lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCAmelCase = None # the split name of split_dict takes over the name of the split info object lowerCAmelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]: # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
338
1
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase_ ( UpperCamelCase_ ): """simple docstring""" def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any: super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCAmelCase = eval_examples lowerCAmelCase = post_process_function def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ) ->int: lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset lowerCAmelCase = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowerCAmelCase = self.compute_metrics lowerCAmelCase = None lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowerCAmelCase = time.time() try: lowerCAmelCase = eval_loop( __SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: lowerCAmelCase = compute_metrics lowerCAmelCase = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowerCAmelCase = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions ) lowerCAmelCase = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): lowerCAmelCase = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) else: lowerCAmelCase = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__SCREAMING_SNAKE_CASE ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE ) return metrics def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ) ->Any: lowerCAmelCase = self.get_test_dataloader(__SCREAMING_SNAKE_CASE ) # Temporarily disable metric computation, we will do it in the loop here. lowerCAmelCase = self.compute_metrics lowerCAmelCase = None lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowerCAmelCase = time.time() try: lowerCAmelCase = eval_loop( __SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: lowerCAmelCase = compute_metrics lowerCAmelCase = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowerCAmelCase = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' ) lowerCAmelCase = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): lowerCAmelCase = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
338
import unittest import numpy as np def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray: lowerCAmelCase = np.shape(snake_case__ ) lowerCAmelCase = np.shape(snake_case__ ) lowerCAmelCase = np.shape(snake_case__ ) if shape_a[0] != shape_b[0]: lowerCAmelCase = ( '''Expected the same number of rows for A and B. ''' f"Instead found A of size {shape_a} and B of size {shape_b}" ) raise ValueError(snake_case__ ) if shape_b[1] != shape_c[1]: lowerCAmelCase = ( '''Expected the same number of columns for B and C. ''' f"Instead found B of size {shape_b} and C of size {shape_c}" ) raise ValueError(snake_case__ ) lowerCAmelCase = pseudo_inv if a_inv is None: try: lowerCAmelCase = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1], [6, 3]] ) lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.block([[a, b], [b.T, c]] ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE ) self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s ) def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->None: lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] ) lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
338
1
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> List[Any]: assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase = features.copy() if features else default_expected_features lowerCAmelCase = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Tuple: if issubclass(snake_case__ , snake_case__ ): lowerCAmelCase = parquet_path elif issubclass(snake_case__ , snake_case__ ): lowerCAmelCase = [parquet_path] lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[int]: assert isinstance(snake_case__ , snake_case__ ) for split in splits: lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]: lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase = features.copy() if features else default_expected_features lowerCAmelCase = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase = ParquetDatasetReader({'''train''': parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any: if split: lowerCAmelCase = {split: parquet_path} else: lowerCAmelCase = '''train''' lowerCAmelCase = {'''train''': parquet_path, '''test''': parquet_path} lowerCAmelCase = tmp_path / '''cache''' lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Any: lowerCAmelCase = ParquetDatasetWriter(snake_case__ , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase = pq.ParquetFile(tmp_path / '''foo.parquet''' ) lowerCAmelCase = pf.read() assert dataset.data.table == output_table def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: lowerCAmelCase = str(shared_datadir / '''test_image_rgb.jpg''' ) lowerCAmelCase = {'''image''': [image_path]} lowerCAmelCase = Features({'''image''': Image()} ) lowerCAmelCase = Dataset.from_dict(snake_case__ , features=snake_case__ ) lowerCAmelCase = ParquetDatasetWriter(snake_case__ , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features lowerCAmelCase = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=snake_case__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Tuple: assert get_writer_batch_size(snake_case__ ) == expected
338
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowercase__ : Any = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: lowerCAmelCase = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) lowercase__ : List[Any] = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase = list(s_dict.keys() ) for key in keys: lowerCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ ) print(f"{key} -> {new_key}" ) lowerCAmelCase = s_dict.pop(snake_case__ ) return s_dict def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]: lowerCAmelCase , lowerCAmelCase = emb.weight.shape lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) lowerCAmelCase = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes: os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowerCAmelCase = os.path.basename(snake_case__ ) lowerCAmelCase = url.split('''/''' )[-2] lowerCAmelCase = os.path.join(snake_case__ , snake_case__ ) if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ): raise RuntimeError(f"{download_target} exists and is not a regular file" ) if os.path.isfile(snake_case__ ): lowerCAmelCase = open(snake_case__ , '''rb''' ).read() if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop: while True: lowerCAmelCase = source.read(8_1_9_2 ) if not buffer: break output.write(snake_case__ ) loop.update(len(snake_case__ ) ) lowerCAmelCase = open(snake_case__ , '''rb''' ).read() if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: if ".pt" not in checkpoint_path: lowerCAmelCase = _download(_MODELS[checkpoint_path] ) else: lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' ) lowerCAmelCase = original_checkpoint['''dims'''] lowerCAmelCase = original_checkpoint['''model_state_dict'''] lowerCAmelCase = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(snake_case__ ) rename_keys(snake_case__ ) lowerCAmelCase = True lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] lowerCAmelCase = WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ ) lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ ) if len(snake_case__ ) > 0 and not set(snake_case__ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f" but all the following weights are missing {missing}" ) if tie_embeds: lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: lowerCAmelCase = proj_out_weights model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase__ : List[str] = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowercase__ : int = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
338
1
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bool: lowerCAmelCase = len(snake_case__ ) + 1 lowerCAmelCase = len(snake_case__ ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. lowerCAmelCase = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )] # since string of zero length match pattern of zero length lowerCAmelCase = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , snake_case__ ): lowerCAmelCase = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , snake_case__ ): lowerCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , snake_case__ ): for j in range(1 , snake_case__ ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": lowerCAmelCase = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: lowerCAmelCase = 1 elif pattern[j - 2] in (input_string[i - 1], "."): lowerCAmelCase = dp[i - 1][j] else: lowerCAmelCase = 0 else: lowerCAmelCase = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") lowercase__ : Tuple = '''aab''' lowercase__ : str = '''c*a*b''' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f'{input_string} matches the given pattern {pattern}') else: print(f'{input_string} does not match with the given pattern {pattern}')
338
from ...processing_utils import ProcessorMixin class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""] UpperCAmelCase_ : Optional[int] = """TvltImageProcessor""" UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]: super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = image_processor lowerCAmelCase = feature_extractor def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]: if images is None and audio is None: raise ValueError('''You need to specify either an `images` or `audio` input to process.''' ) lowerCAmelCase = None if images is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if images_mixed is not None: lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if audio is not None: lowerCAmelCase = self.feature_extractor( __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCAmelCase = {} if audio is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) if images is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) if images_mixed_dict is not None: output_dict.update(__SCREAMING_SNAKE_CASE ) return output_dict @property def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = self.image_processor.model_input_names lowerCAmelCase = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
338
1